In [2]:
import pandas as pd
import numpy as np

import matplotlib.pyplot as plt         
import matplotlib.dates as dates
from mpl_toolkits.mplot3d import Axes3D

from datetime import datetime

import statsmodels.formula.api as sm

from scipy.optimize import fsolve
from scipy import stats

import random
In [3]:
dateparser=lambda x: pd.datetime.strptime(x,'%d/%m/%Y')
df = pd.read_csv('Data for Jupyter Lab.csv', sep=',',decimal='.', parse_dates=['Date_NORWAY','Date_SWEDEN','Date_LIBOR'], date_parser= dateparser)
In [4]:
df
Out[4]:
t Date_NORWAY Price_NORWAY LN_NORWAY Date_SWEDEN Price_SWEDEN LN_SWEDEN Date_LIBOR Price_LIBOR LN_LIBOR
0 0 1989-01-25 6.6880 NaN 1986-11-18 131.155 NaN 1989-03-21 13.00000 NaN
1 1 1989-01-26 6.6900 0.000299 1986-11-19 132.042 0.006740 1989-03-22 12.87500 -0.009662
2 2 1989-01-27 6.7300 0.005961 1986-11-20 132.362 0.002421 1989-03-23 12.87500 0.000000
3 3 1989-01-30 6.7240 -0.000892 1986-11-21 129.812 -0.019453 1989-03-28 12.87500 0.000000
4 4 1989-01-31 6.7645 0.006005 1986-11-24 128.730 -0.008370 1989-03-29 12.93750 0.004843
5 5 1989-02-01 6.7500 -0.002146 1986-11-25 128.210 -0.004048 1989-03-30 12.87500 -0.004843
6 6 1989-02-02 6.7700 0.002959 1986-11-26 129.125 0.007111 1989-03-31 12.93750 0.004843
7 7 1989-02-03 6.7950 0.003686 1986-11-27 129.595 0.003633 1989-04-03 13.00000 0.004819
8 8 1989-02-06 6.7910 -0.000589 1986-11-28 128.000 -0.012384 1989-04-04 13.00000 0.000000
9 9 1989-02-07 6.7550 -0.005315 1986-12-01 125.232 -0.021862 1989-04-05 13.00000 0.000000
10 10 1989-02-08 6.7800 0.003694 1986-12-02 124.947 -0.002278 1989-04-06 13.06250 0.004796
11 11 1989-02-09 6.7330 -0.006956 1986-12-03 127.460 0.019913 1989-04-07 13.12500 0.004773
12 12 1989-02-10 6.7310 -0.000297 1986-12-04 126.450 -0.007956 1989-04-10 13.31250 0.014185
13 13 1989-02-13 6.7530 0.003263 1986-12-05 125.282 -0.009280 1989-04-11 13.31250 0.000000
14 14 1989-02-14 6.7050 -0.007133 1986-12-08 123.400 -0.015136 1989-04-12 13.37500 0.004684
15 15 1989-02-15 6.6900 -0.002240 1986-12-09 123.840 0.003559 1989-04-13 13.34375 -0.002339
16 16 1989-02-16 6.6960 0.000896 1986-12-10 123.210 -0.005100 1989-04-14 13.43750 0.007001
17 17 1989-02-17 6.6810 -0.002243 1986-12-11 125.300 0.016821 1989-04-17 13.18750 -0.018780
18 18 1989-02-21 6.7050 0.003586 1986-12-12 127.255 0.015482 1989-04-18 13.06250 -0.009524
19 19 1989-02-22 6.6900 -0.002240 1986-12-15 125.480 -0.014047 1989-04-19 13.06250 0.000000
20 20 1989-02-23 6.6750 -0.002245 1986-12-16 124.987 -0.003937 1989-04-20 13.00000 -0.004796
21 21 1989-02-24 6.6850 0.001497 1986-12-17 125.577 0.004709 1989-04-21 13.56250 0.042359
22 22 1989-02-27 6.6890 0.000598 1986-12-18 125.622 0.000358 1989-04-24 13.37500 -0.013921
23 23 1989-02-28 6.7090 0.002986 1986-12-19 126.225 0.004789 1989-04-25 13.37500 0.000000
24 24 1989-03-01 6.7390 0.004462 1986-12-22 125.495 -0.005800 1989-04-26 13.25000 -0.009390
25 25 1989-03-02 6.7420 0.000445 1986-12-23 125.007 -0.003896 1989-04-27 13.00000 -0.019048
26 26 1989-03-03 6.7360 -0.000890 1986-12-29 125.005 -0.000016 1989-04-28 12.93750 -0.004819
27 27 1989-03-06 6.7625 0.003926 1986-12-30 126.177 0.009332 1989-05-02 13.04688 0.008419
28 28 1989-03-07 6.7630 0.000074 1987-01-02 127.180 0.007918 1989-05-03 13.00000 -0.003600
29 29 1989-03-08 6.7700 0.001035 1987-01-05 127.992 0.006364 1989-05-04 12.93750 -0.004819
... ... ... ... ... ... ... ... ... ... ...
7531 7531 2019-01-16 8.5305 -0.002809 2016-11-21 1478.582 0.001427 2019-01-21 1.16638 -0.005344
7532 7532 2019-01-17 8.5554 0.002915 2016-11-22 1485.048 0.004364 2019-01-22 1.15875 -0.006563
7533 7533 2019-01-18 8.5509 -0.000526 2016-11-23 1482.635 -0.001626 2019-01-23 1.17088 0.010414
7534 7534 2019-01-22 8.5961 0.005272 2016-11-24 1484.330 0.001143 2019-01-24 1.16988 -0.000854
7535 7535 2019-01-23 8.5848 -0.001315 2016-11-25 1491.229 0.004637 2019-01-25 1.17363 0.003200
7536 7536 2019-01-24 8.5756 -0.001072 2016-11-28 1472.681 -0.012516 2019-01-28 1.17238 -0.001066
7537 7537 2019-01-25 8.5193 -0.006587 2016-11-29 1467.178 -0.003744 2019-01-29 1.16163 -0.009212
7538 7538 2019-01-28 8.5147 -0.000540 2016-11-30 1481.141 0.009472 2019-01-30 1.16438 0.002365
7539 7539 2019-01-29 8.5008 -0.001634 2016-12-01 1477.951 -0.002156 2019-01-31 1.16688 0.002145
7540 7540 2019-01-30 8.4725 -0.003335 2016-12-02 1470.083 -0.005338 2019-02-01 1.16950 0.002243
7541 7541 2019-01-31 8.4359 -0.004329 2016-12-05 1483.286 0.008941 2019-02-04 1.17663 0.006078
7542 7542 2019-02-01 8.4316 -0.000510 2016-12-06 1492.171 0.005972 2019-02-05 1.16413 -0.010680
7543 7543 2019-02-04 8.4700 0.004544 2016-12-07 1503.954 0.007866 2019-02-06 1.16988 0.004927
7544 7544 2019-02-05 8.4813 0.001333 2016-12-08 1520.148 0.010710 2019-02-07 1.16463 -0.004498
7545 7545 2019-02-06 8.5235 0.004963 2016-12-09 1534.273 0.009249 2019-02-08 1.14175 -0.019841
7546 7546 2019-02-07 8.5805 0.006665 2016-12-12 1537.925 0.002377 2019-02-11 1.13763 -0.003615
7547 7547 2019-02-08 8.6335 0.006158 2016-12-13 1550.998 0.008464 2019-02-12 1.13238 -0.004626
7548 7548 2019-02-11 8.7232 0.010336 2016-12-14 1541.388 -0.006215 2019-02-13 1.12975 -0.002325
7549 7549 2019-02-12 8.6525 -0.008138 2016-12-15 1550.516 0.005904 2019-02-14 1.12938 -0.000328
7550 7550 2019-02-13 8.6426 -0.001145 2016-12-16 1548.195 -0.001498 2019-02-15 1.12400 -0.004775
7551 7551 2019-02-14 8.6572 0.001688 2016-12-19 1542.709 -0.003550 2019-02-18 1.12575 0.001556
7552 7552 2019-02-15 8.6473 -0.001144 2016-12-20 1536.725 -0.003886 2019-02-19 1.12438 -0.001218
7553 7553 2019-02-19 8.5829 -0.007475 2016-12-21 1525.467 -0.007353 2019-02-20 1.12275 -0.001451
7554 7554 2019-02-21 8.6254 0.004939 2016-12-22 1519.539 -0.003894 2019-02-21 1.13850 0.013931
7555 7555 2019-02-22 8.6022 -0.002693 2016-12-23 1525.801 0.004113 2019-02-22 1.13813 -0.000325
7556 7556 2019-02-25 8.6093 0.000825 2016-12-27 1533.311 0.004910 2019-02-25 1.13650 -0.001433
7557 7557 2019-02-26 8.5781 -0.003631 2016-12-28 1527.304 -0.003925 2019-02-26 1.13425 -0.001982
7558 7558 2019-02-27 8.5433 -0.004065 2016-12-29 1518.365 -0.005870 2019-02-27 1.13463 0.000335
7559 7559 2019-02-28 8.5538 0.001228 2016-12-30 1517.197 -0.000770 2019-02-28 1.13925 0.004064
7560 7560 2019-03-01 8.5778 0.002802 2017-01-02 1526.826 0.006327 2019-03-01 1.13525 -0.003517

7561 rows × 10 columns

In [122]:
basic_statistics = pd.DataFrame()
basic_statistics['Norway']= [df.LN_NORWAY.mean(), df.LN_NORWAY.std(), stats.kurtosis(df.LN_NORWAY[1:-1])]
basic_statistics['Sweden']= [df.LN_SWEDEN.mean(), df.LN_SWEDEN.std(),stats.kurtosis(df.LN_SWEDEN[1:-1])]
basic_statistics['LIBOR']= [df.LN_LIBOR.mean(), df.LN_LIBOR.std(), stats.kurtosis(df.LN_LIBOR[1:-1])]

basic_statistics.rename(index = {0:"Mean", 1:"Standard Deviation", 2:"Kurtosis"}, inplace = True)

basic_statistics
Out[122]:
Norway Sweden LIBOR
Mean 0.000033 0.000325 -0.000322
Standard Deviation 0.007228 0.014584 0.008319
Kurtosis 4.279930 4.362480 86.640312
In [59]:
plt.grid(b=True)
plt.xlabel('Date')
plt.ylabel('Price (USD/NOK)')
plt.title("Price path for\nNorwegian exchange rate")

plt.fill_between(list(df.Date_NORWAY), df.Price_NORWAY, color="red", alpha=0.2)
Out[59]:
<matplotlib.collections.PolyCollection at 0x1a1f4212b0>
In [123]:
plt.grid(b=True)
plt.xlabel('Date')
plt.ylabel('Price (SEK)')
plt.title("Price path for\nSwedish stock market")


plt.fill_between(list(df.Date_SWEDEN), df.Price_SWEDEN, color="gold", alpha=0.2)
Out[123]:
<matplotlib.collections.PolyCollection at 0x1a20292d30>
In [6]:
plt.grid(b=True)
plt.xlabel('Date')
plt.ylabel('Annual Yield (%)')
plt.title("Yield path for\nLIBOR 12-month rate in British pounds")

plt.fill_between(list(df.Date_LIBOR), df.Price_LIBOR, color="blue", alpha=0.2)
Out[6]:
<matplotlib.collections.PolyCollection at 0x1a1f4507f0>
In [7]:
plt.figure(figsize=(24,5))
plt.grid(b=True)

plt.xlabel('Date')
plt.ylabel('Price change\n(%)')

plt.title("Natural-logs for\nNorwegian exchange rate")

plt.plot(df.Date_NORWAY, df.LN_NORWAY, color="red",linewidth=0.5)
plt.gca().set_yticklabels(['{:.0f}'.format(x*100) for x in plt.gca().get_yticks()])

plt.show()
In [8]:
plt.figure(figsize=(24,5))
plt.grid(b=True)

plt.xlabel('Date')
plt.ylabel('Price change\n(%)')

plt.title("Natural-logs for\nSwedish stock market")

plt.plot(df.Date_SWEDEN, df.LN_SWEDEN, color="gold",linewidth=0.5)
plt.gca().set_yticklabels(['{:.0f}'.format(x*100) for x in plt.gca().get_yticks()])

plt.show()
In [9]:
plt.figure(figsize=(24,5))
plt.grid(b=True)


plt.xlabel('Date')
plt.ylabel('Yield change\n(%)')

plt.title("Natural-logs for\nLIBOR 12-month rate in British pounds")

plt.plot(df.Date_LIBOR, df.LN_LIBOR, color="blue",linewidth=0.5)
plt.gca().set_yticklabels(['{:.0f}'.format(x*100) for x in plt.gca().get_yticks()])

plt.show()
In [10]:
print("Here we consider if the daily price-changes for our three markets are Normally distributed.\nTo do this we use two statistical tests.")
print("\nHere we show the resulting p-values.\n\nAs we can see, the odds of these markets returns being Normally distributed are practically zero.\n")

import warnings
warnings.simplefilter("ignore")

Statistical_Tests=[[stats.kstest(list(df.LN_NORWAY[1:-1]), 'norm').pvalue,
                    stats.kstest(list(df.LN_SWEDEN[1:-1]), 'norm').pvalue,
                    stats.kstest(list(df.LN_LIBOR[1:-1]), 'norm').pvalue] , 
                   
                   [stats.shapiro(list(df.LN_NORWAY[1:-1]))[1],
                    stats.shapiro(list(df.LN_SWEDEN[1:-1]))[1],
                    stats.shapiro(list(df.LN_LIBOR[1:-1]))[1]]]

Statistical_Tests = pd.DataFrame(Statistical_Tests, columns = ['Norway', 'Sweden', 'LIBOR'], index = ['Kolmogorov-Smirnov', 'Shapiro-Wilk'])
Statistical_Tests
Here we consider if the daily price-changes for our three markets are Normally distributed.
To do this we use two statistical tests.

Here we show the resulting p-values.

As we can see, the odds of these markets returns being Normally distributed are practically zero.

Out[10]:
Norway Sweden LIBOR
Kolmogorov-Smirnov 0.000000e+00 0.000000e+00 0.0
Shapiro-Wilk 3.227670e-39 2.522337e-44 0.0
In [10]:
print("To begin the multifractal analysis, we must first define our Xt variables.\nIt is useful to have a function for this.")

Xt= lambda P: np.log(P) - np.log(P[0])

df['Xt_NORWAY']= Xt(df.Price_NORWAY)
df['Xt_SWEDEN']= Xt(df.Price_SWEDEN)
df['Xt_LIBOR']= Xt(df.Price_LIBOR)
To begin the multifractal analysis, we must first define our Xt variables.
It is useful to have a function for this.
In [11]:
df[['Price_NORWAY','LN_NORWAY','Xt_NORWAY','Price_SWEDEN','LN_SWEDEN','Xt_SWEDEN','Price_LIBOR','LN_LIBOR','Xt_LIBOR']]
Out[11]:
Price_NORWAY LN_NORWAY Xt_NORWAY Price_SWEDEN LN_SWEDEN Xt_SWEDEN Price_LIBOR LN_LIBOR Xt_LIBOR
0 6.6880 NaN 0.000000 131.155 NaN 0.000000 13.00000 NaN 0.000000
1 6.6900 0.000299 0.000299 132.042 0.006740 0.006740 12.87500 -0.009662 -0.009662
2 6.7300 0.005961 0.006260 132.362 0.002421 0.009161 12.87500 0.000000 -0.009662
3 6.7240 -0.000892 0.005368 129.812 -0.019453 -0.010293 12.87500 0.000000 -0.009662
4 6.7645 0.006005 0.011373 128.730 -0.008370 -0.018663 12.93750 0.004843 -0.004819
5 6.7500 -0.002146 0.009228 128.210 -0.004048 -0.022710 12.87500 -0.004843 -0.009662
6 6.7700 0.002959 0.012186 129.125 0.007111 -0.015599 12.93750 0.004843 -0.004819
7 6.7950 0.003686 0.015872 129.595 0.003633 -0.011966 13.00000 0.004819 0.000000
8 6.7910 -0.000589 0.015283 128.000 -0.012384 -0.024350 13.00000 0.000000 0.000000
9 6.7550 -0.005315 0.009968 125.232 -0.021862 -0.046212 13.00000 0.000000 0.000000
10 6.7800 0.003694 0.013662 124.947 -0.002278 -0.048490 13.06250 0.004796 0.004796
11 6.7330 -0.006956 0.006706 127.460 0.019913 -0.028577 13.12500 0.004773 0.009569
12 6.7310 -0.000297 0.006409 126.450 -0.007956 -0.036533 13.31250 0.014185 0.023754
13 6.7530 0.003263 0.009672 125.282 -0.009280 -0.045813 13.31250 0.000000 0.023754
14 6.7050 -0.007133 0.002539 123.400 -0.015136 -0.060949 13.37500 0.004684 0.028438
15 6.6900 -0.002240 0.000299 123.840 0.003559 -0.057389 13.34375 -0.002339 0.026099
16 6.6960 0.000896 0.001195 123.210 -0.005100 -0.062490 13.43750 0.007001 0.033100
17 6.6810 -0.002243 -0.001047 125.300 0.016821 -0.045669 13.18750 -0.018780 0.014320
18 6.7050 0.003586 0.002539 127.255 0.015482 -0.030187 13.06250 -0.009524 0.004796
19 6.6900 -0.002240 0.000299 125.480 -0.014047 -0.044233 13.06250 0.000000 0.004796
20 6.6750 -0.002245 -0.001946 124.987 -0.003937 -0.048170 13.00000 -0.004796 0.000000
21 6.6850 0.001497 -0.000449 125.577 0.004709 -0.043461 13.56250 0.042359 0.042359
22 6.6890 0.000598 0.000150 125.622 0.000358 -0.043102 13.37500 -0.013921 0.028438
23 6.7090 0.002986 0.003135 126.225 0.004789 -0.038314 13.37500 0.000000 0.028438
24 6.7390 0.004462 0.007597 125.495 -0.005800 -0.044114 13.25000 -0.009390 0.019048
25 6.7420 0.000445 0.008042 125.007 -0.003896 -0.048010 13.00000 -0.019048 0.000000
26 6.7360 -0.000890 0.007151 125.005 -0.000016 -0.048026 12.93750 -0.004819 -0.004819
27 6.7625 0.003926 0.011078 126.177 0.009332 -0.038694 13.04688 0.008419 0.003600
28 6.7630 0.000074 0.011152 127.180 0.007918 -0.030776 13.00000 -0.003600 0.000000
29 6.7700 0.001035 0.012186 127.992 0.006364 -0.024412 12.93750 -0.004819 -0.004819
... ... ... ... ... ... ... ... ... ...
7531 8.5305 -0.002809 0.243333 1478.582 0.001427 2.422459 1.16638 -0.005344 -2.411044
7532 8.5554 0.002915 0.246248 1485.048 0.004364 2.426823 1.15875 -0.006563 -2.417608
7533 8.5509 -0.000526 0.245722 1482.635 -0.001626 2.425196 1.17088 0.010414 -2.407194
7534 8.5961 0.005272 0.250994 1484.330 0.001143 2.426339 1.16988 -0.000854 -2.408048
7535 8.5848 -0.001315 0.249678 1491.229 0.004637 2.430976 1.17363 0.003200 -2.404848
7536 8.5756 -0.001072 0.248606 1472.681 -0.012516 2.418460 1.17238 -0.001066 -2.405913
7537 8.5193 -0.006587 0.242019 1467.178 -0.003744 2.414716 1.16163 -0.009212 -2.415125
7538 8.5147 -0.000540 0.241479 1481.141 0.009472 2.424188 1.16438 0.002365 -2.412761
7539 8.5008 -0.001634 0.239845 1477.951 -0.002156 2.422032 1.16688 0.002145 -2.410616
7540 8.4725 -0.003335 0.236511 1470.083 -0.005338 2.416694 1.16950 0.002243 -2.408373
7541 8.4359 -0.004329 0.232182 1483.286 0.008941 2.425635 1.17663 0.006078 -2.402295
7542 8.4316 -0.000510 0.231672 1492.171 0.005972 2.431608 1.16413 -0.010680 -2.412975
7543 8.4700 0.004544 0.236216 1503.954 0.007866 2.439473 1.16988 0.004927 -2.408048
7544 8.4813 0.001333 0.237549 1520.148 0.010710 2.450183 1.16463 -0.004498 -2.412546
7545 8.5235 0.004963 0.242512 1534.273 0.009249 2.459432 1.14175 -0.019841 -2.432387
7546 8.5805 0.006665 0.249177 1537.925 0.002377 2.461810 1.13763 -0.003615 -2.436002
7547 8.6335 0.006158 0.255335 1550.998 0.008464 2.470274 1.13238 -0.004626 -2.440628
7548 8.7232 0.010336 0.265671 1541.388 -0.006215 2.464059 1.12975 -0.002325 -2.442953
7549 8.6525 -0.008138 0.257533 1550.516 0.005904 2.469963 1.12938 -0.000328 -2.443281
7550 8.6426 -0.001145 0.256389 1548.195 -0.001498 2.468465 1.12400 -0.004775 -2.448056
7551 8.6572 0.001688 0.258076 1542.709 -0.003550 2.464915 1.12575 0.001556 -2.446500
7552 8.6473 -0.001144 0.256932 1536.725 -0.003886 2.461029 1.12438 -0.001218 -2.447718
7553 8.5829 -0.007475 0.249457 1525.467 -0.007353 2.453676 1.12275 -0.001451 -2.449168
7554 8.6254 0.004939 0.254396 1519.539 -0.003894 2.449782 1.13850 0.013931 -2.435238
7555 8.6022 -0.002693 0.251703 1525.801 0.004113 2.453895 1.13813 -0.000325 -2.435563
7556 8.6093 0.000825 0.252528 1533.311 0.004910 2.458805 1.13650 -0.001433 -2.436996
7557 8.5781 -0.003631 0.248898 1527.304 -0.003925 2.454880 1.13425 -0.001982 -2.438978
7558 8.5433 -0.004065 0.244832 1518.365 -0.005870 2.449010 1.13463 0.000335 -2.438643
7559 8.5538 0.001228 0.246061 1517.197 -0.000770 2.448240 1.13925 0.004064 -2.434579
7560 8.5778 0.002802 0.248863 1526.826 0.006327 2.454567 1.13525 -0.003517 -2.438096

7561 rows × 9 columns

In [13]:
plt.grid(b=True)
plt.xlabel('t\n(time in days)')
plt.ylabel('Xt \n(natural-log change since time 0)')

plt.title("Growth over time for Norwegian exchange rate")

plt.fill_between(list(df.t), df.Xt_NORWAY, color="red", alpha=0.2)
Out[13]:
<matplotlib.collections.PolyCollection at 0x1a1f8a2e48>
In [14]:
plt.grid(b=True)
plt.xlabel('t\n(time in days)')
plt.ylabel('Xt \n(natural-log change since time 0)')

plt.title("Growth over time for Swedish stock market")

plt.fill_between(list(df.t), df.Xt_SWEDEN, color="gold", alpha=0.2)
Out[14]:
<matplotlib.collections.PolyCollection at 0x1a1f902748>
In [15]:
plt.grid(b=True)
plt.xlabel('t\n(time in days)')
plt.ylabel('Xt \n(natural-log change since time 0)')

plt.title("Growth over time for British LIBOR")

plt.fill_between(list(df.t), df.Xt_LIBOR, color="blue", alpha=0.2)
Out[15]:
<matplotlib.collections.PolyCollection at 0x1a1fc29ac8>
In [12]:
print("We will be using a HIGHLY COMPOSITE NUMBER - 7560 - as our number of observations. \nThis is because this number has a lot of factors (numbers that can divide 7560 and yield a whole number).\n \nWe will call our time increments 'delta_t'.\n \nThe number of time increments - delta_t's - that we use in the analysis is the same as the number of factors. ")

delta_t=(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 14, 15, 18, 20, 21, 24, 27, 28, 30, 35, 36, 40, 42, 45, 54, 56, 60, 63, 70, 72, 84, 90, 105, 108, 120, 126, 135, 140, 168, 180, 189, 210, 216, 252, 270, 280, 315, 360, 378, 420, 504, 540, 630, 756, 840, 945, 1080, 1260, 1512, 1890, 2520, 3780, 7560)
We will be using a HIGHLY COMPOSITE NUMBER - 7560 - as our number of observations. 
This is because this number has a lot of factors (numbers that can divide 7560 and yield a whole number).
 
We will call our time increments 'delta_t'.
 
The number of time increments - delta_t's - that we use in the analysis is the same as the number of factors. 
In [13]:
print("The number of delta_t's is: " + str(len(delta_t)))
The number of delta_t's is: 64
In [14]:
print("Here are all the delta_t's (the factors of 7560):\n")

for i in range (0,int(len(delta_t)/2)):
    print("Factor " + str(i+1) + ": 7560 divided by " + str(delta_t[i]) + " is " + str(int(7560/delta_t[i])))
    
print("\n...\n \netc. \n \nAfter factor 32, the next factors are simply the results of the \ndivisions (90, 105, 108 etc. up to 7560), for a total of 64 factors.")
Here are all the delta_t's (the factors of 7560):

Factor 1: 7560 divided by 1 is 7560
Factor 2: 7560 divided by 2 is 3780
Factor 3: 7560 divided by 3 is 2520
Factor 4: 7560 divided by 4 is 1890
Factor 5: 7560 divided by 5 is 1512
Factor 6: 7560 divided by 6 is 1260
Factor 7: 7560 divided by 7 is 1080
Factor 8: 7560 divided by 8 is 945
Factor 9: 7560 divided by 9 is 840
Factor 10: 7560 divided by 10 is 756
Factor 11: 7560 divided by 12 is 630
Factor 12: 7560 divided by 14 is 540
Factor 13: 7560 divided by 15 is 504
Factor 14: 7560 divided by 18 is 420
Factor 15: 7560 divided by 20 is 378
Factor 16: 7560 divided by 21 is 360
Factor 17: 7560 divided by 24 is 315
Factor 18: 7560 divided by 27 is 280
Factor 19: 7560 divided by 28 is 270
Factor 20: 7560 divided by 30 is 252
Factor 21: 7560 divided by 35 is 216
Factor 22: 7560 divided by 36 is 210
Factor 23: 7560 divided by 40 is 189
Factor 24: 7560 divided by 42 is 180
Factor 25: 7560 divided by 45 is 168
Factor 26: 7560 divided by 54 is 140
Factor 27: 7560 divided by 56 is 135
Factor 28: 7560 divided by 60 is 126
Factor 29: 7560 divided by 63 is 120
Factor 30: 7560 divided by 70 is 108
Factor 31: 7560 divided by 72 is 105
Factor 32: 7560 divided by 84 is 90

...
 
etc. 
 
After factor 32, the next factors are simply the results of the 
divisions (90, 105, 108 etc. up to 7560), for a total of 64 factors.
In [15]:
print("Let us now choose the values of q - the statistical moments - that we will use for our partition function.")
print("\n(Recall that q=1 is the mean, q=2 is the variance, q=4 is the kurtosis etc.")
print("Well... almost... in fact, only q=1 is as the mean is correct. The other 'raw' moments need to be normalized.\nBut we don't need that distinction here.)")
print("\nMandelbrot et al (1997) chose approximately 99 values for q, mostly concentrated around the low values between 0 and 5.\nWe shall try to do something similar.")


q=[0.01, 0.1, 0.2, 0.3, 0.4, 
   0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 
   1.0, 1.05, 1.1, 1.15, 1.2, 1.25, 1.3, 1.35, 1.4, 1.45, 
   1.5, 1.55, 1.6, 1.65, 1.7, 1.75, 
   1.8, 1.81, 1.82, 1.83, 1.84, 1.85, 1.86, 1.87, 1.88, 1.89, 
   1.9, 1.91, 1.92, 1.93, 1.94, 1.95, 1.96, 1.97, 1.98, 1.985, 1.99, 
   1.991, 1.992, 1.993, 1.994, 1.995, 1.996, 1.997, 1.998, 1.999, 
   2.0, 
   2.001, 2.002, 2.003, 2.004, 2.005, 2.006, 2.007, 2.008, 2.009, 
   2.01, 2.015, 2.02, 2.025, 
   2.03, 2.04, 2.05, 2.06, 2.07, 2.08, 2.09, 
   2.1, 2.15, 2.2, 2.25, 2.3, 2.35, 2.4, 2.45, 2.5, 
   2.6, 2.7, 2.8, 2.9, 
   3.0, 3.1, 3.2, 3.3, 3.4, 3.5, 3.6, 3.7, 3.8, 3.9, 
   4.0, 4.5, 
   5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 
   12.5, 15.0, 17.5, 20.0, 22.5, 25.0, 27.5, 
   30.0]


print("\nThe number of q's that we will use is " + str(len(q)) + ".")
Let us now choose the values of q - the statistical moments - that we will use for our partition function.

(Recall that q=1 is the mean, q=2 is the variance, q=4 is the kurtosis etc.
Well... almost... in fact, only q=1 is as the mean is correct. The other 'raw' moments need to be normalized.
But we don't need that distinction here.)

Mandelbrot et al (1997) chose approximately 99 values for q, mostly concentrated around the low values between 0 and 5.
We shall try to do something similar.

The number of q's that we will use is 121.
In [16]:
plt.figure(figsize=(12,3))
plt.grid(b=True)

plt.title("The distribution of q's used for the Partition Function (Sq(t,delta_t))")
plt.xlabel("Index of q\n(Total " + str(len(q)) + ")")
plt.ylabel("q\n(the 'raw' moment)")

plt.plot(q, marker='x')
Out[16]:
[<matplotlib.lines.Line2D at 0x1a51b3e358>]
In [17]:
print("q is:\n\n" + str(q))
q is:

[0.01, 0.1, 0.2, 0.3, 0.4, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 1.0, 1.05, 1.1, 1.15, 1.2, 1.25, 1.3, 1.35, 1.4, 1.45, 1.5, 1.55, 1.6, 1.65, 1.7, 1.75, 1.8, 1.81, 1.82, 1.83, 1.84, 1.85, 1.86, 1.87, 1.88, 1.89, 1.9, 1.91, 1.92, 1.93, 1.94, 1.95, 1.96, 1.97, 1.98, 1.985, 1.99, 1.991, 1.992, 1.993, 1.994, 1.995, 1.996, 1.997, 1.998, 1.999, 2.0, 2.001, 2.002, 2.003, 2.004, 2.005, 2.006, 2.007, 2.008, 2.009, 2.01, 2.015, 2.02, 2.025, 2.03, 2.04, 2.05, 2.06, 2.07, 2.08, 2.09, 2.1, 2.15, 2.2, 2.25, 2.3, 2.35, 2.4, 2.45, 2.5, 2.6, 2.7, 2.8, 2.9, 3.0, 3.1, 3.2, 3.3, 3.4, 3.5, 3.6, 3.7, 3.8, 3.9, 4.0, 4.5, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 12.5, 15.0, 17.5, 20.0, 22.5, 25.0, 27.5, 30.0]
In [18]:
print("The number of q's between 1 and 3 is: " + str(sum(float(num) <= 3 and float(num) >=1 for num in q)) +"\n")

print("The number of q's less than 1 is: " + str(sum(float(num) <= 1 for num in q)))
print("The number of q's greater than 3 is: " + str(sum(float(num) >= 3 for num in q)) +"\n")

print("Total q's: " + str(len(q)))
The number of q's between 1 and 3 is: 81

The number of q's less than 1 is: 16
The number of q's greater than 3 is: 26

Total q's: 121
In [19]:
print("The next step is to figure out the partition function - Sq(T,delta_t) - for different values of delta_t and q.")
print("The partition function Sq(T,delta_t) will be used to estimate the scaling function tau(q).")


def partition_function(SIGMA, DELTA, XT, Q):
    print("Calculating the partition function...\nThis step will take quite a while... so strap yourself in...\n")
    SIGMA=[[0 for x in range(len(DELTA))] for y in range(len(Q))]
    for k in range (0, len(Q)):
        if k%30==0:
            print("calculating i=" + str(k) + ' out of ' + str(len(Q)-1))
        for j in range (0,len(DELTA)):
            for i in range (0,len(XT)-1):
                if i < int(len(XT)/DELTA[j]):
                    SIGMA[k][j]=SIGMA[k][j] + abs(XT[i*DELTA[j]+DELTA[j]]-XT[i*DELTA[j]])**Q[k]

    SIGMA=pd.DataFrame(SIGMA)
    
    for i in range (0,len(Q)):
        SIGMA.rename(index={SIGMA.index[i]:Q[i]}, inplace=True)
    for i in range (len(DELTA)-1,-1,-1):
        SIGMA.rename(columns={SIGMA.columns[i]:DELTA[i]}, inplace=True)
    
    print("Done! Your partition function is ready!\n")
    return SIGMA


print("\nHere we have defined our partition function in the form: \npartition_function(SIGMA, DELTA, XT, Q).")
The next step is to figure out the partition function - Sq(T,delta_t) - for different values of delta_t and q.
The partition function Sq(T,delta_t) will be used to estimate the scaling function tau(q).

Here we have defined our partition function in the form: 
partition_function(SIGMA, DELTA, XT, Q).
In [20]:
print("Here, we will now begin calculating the partition functions for all our markets.\n")

partition_NORWAY=[[]]
partition_SWEDEN=[[]]
partition_LIBOR=[[]]

partition_NORWAY=partition_function(partition_NORWAY, delta_t, df.Xt_NORWAY, q)
partition_SWEDEN=partition_function(partition_SWEDEN, delta_t, df.Xt_SWEDEN, q)
partition_LIBOR=partition_function(partition_LIBOR, delta_t, df.Xt_LIBOR, q)

print("\nDone! You can now proceed to estimating the scaling function - tau(q).")
Here, we will now begin calculating the partition functions for all our markets.

Calculating the partition function...
This step will take quite a while... so strap yourself in...

calculating i=0 out of 120
calculating i=30 out of 120
calculating i=60 out of 120
calculating i=90 out of 120
calculating i=120 out of 120
Done! Your partition function is ready!

Calculating the partition function...
This step will take quite a while... so strap yourself in...

calculating i=0 out of 120
calculating i=30 out of 120
calculating i=60 out of 120
calculating i=90 out of 120
calculating i=120 out of 120
Done! Your partition function is ready!

Calculating the partition function...
This step will take quite a while... so strap yourself in...

calculating i=0 out of 120
calculating i=30 out of 120
calculating i=60 out of 120
calculating i=90 out of 120
calculating i=120 out of 120
Done! Your partition function is ready!


Done! You can now proceed to estimating the scaling function - tau(q).
In [21]:
for i in range (0, len(q)):
    plt.plot(np.log(delta_t), np.log(list(partition_NORWAY.iloc[i])/partition_NORWAY[1][q[i]]), color="red",linewidth=0.5, label=str(q[i]))


plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
           ncol=6, mode="expand", borderaxespad=0.)

plt.xlabel('ln (delta_t)\n(the natural log of time increments)')
plt.ylabel('ln ( Sq(delta_t) )\n(the natural log of the partition function)')

plt.show()
In [22]:
for i in range (0, len(q)):
    plt.plot(np.log(delta_t), np.log(list(partition_SWEDEN.iloc[i])/partition_SWEDEN[1][q[i]]), color="gold",linewidth=0.5, label=str(q[i]))


plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
           ncol=6, mode="expand", borderaxespad=0.)

plt.xlabel('ln (delta_t)\n(the natural log of time increments)')
plt.ylabel('ln ( Sq(delta_t) )\n(the natural log of the partition function)')

plt.show()
In [23]:
for i in range (0, len(q)):
    plt.plot(np.log(delta_t), np.log(list(partition_LIBOR.iloc[i])/partition_LIBOR[1][q[i]]), color='blue',linewidth=0.5, label=str(q[i]))


plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
           ncol=6, mode="expand", borderaxespad=0.)

plt.xlabel('ln (delta_t)\n(the natural log of time increments)')
plt.ylabel('ln ( Sq(delta_t) )\n(the natural log of the partition function)')

plt.show()
In [ ]:
 
In [24]:
for i in range (0, len(q)):
    if q[i]%0.25==0 and q[i]<6 and q[i]>1 and q[i]!=3.5 and q[i]!=4.5:
        plt.plot(np.log(delta_t), np.log(list(partition_NORWAY.iloc[i])/partition_NORWAY[1][q[i]]), color="red", linewidth=1.5, label='q='+str(q[i]))


plt.legend(bbox_to_anchor=(0., -0.45, 1., .102), loc=3,
           ncol=5, mode="expand", borderaxespad=0., title="Raw moments")

plt.title("Parition function for Norway")
plt.xlabel('ln (delta_t)\n(the natural log of time increments)')
plt.ylabel('ln ( Sq(delta_t) )\n(the natural log of the partition function)')

plt.show()
In [25]:
for i in range (0, len(q)):
    if q[i]%0.25==0 and q[i]<6 and q[i]>1 and q[i]!=3.5 and q[i]!=4.5:
        plt.plot(np.log(delta_t), np.log(list(partition_SWEDEN.iloc[i])/partition_SWEDEN[1][q[i]]), color="gold",linewidth=1.5, label='q='+str(q[i]))


plt.legend(bbox_to_anchor=(0., -0.45, 1., .102), loc=3,
           ncol=5, mode="expand", borderaxespad=0., title="Raw moments")

plt.title("Parition function for Sweden")
plt.xlabel('ln (delta_t)\n(the natural log of time increments)')
plt.ylabel('ln ( Sq(delta_t) )\n(the natural log of the partition function)')

plt.show()
In [26]:
for i in range (0, len(q)):
    if q[i]%0.25==0 and q[i]<6 and q[i]>1 and q[i]!=3.5 and q[i]!=4.5:
        plt.plot(np.log(delta_t), np.log(list(partition_LIBOR.iloc[i])/partition_LIBOR[1][q[i]]), color="blue",linewidth=1.5, label='q='+str(q[i]))


plt.legend(bbox_to_anchor=(0., -0.45, 1., .102), loc=3,
           ncol=5, mode="expand", borderaxespad=0., title="Raw moments")

plt.title("Parition function for LIBOR")
plt.xlabel('ln (delta_t)\n(the natural log of time increments)')
plt.ylabel('ln ( Sq(delta_t) )\n(the natural log of the partition function)')

plt.show()
In [27]:
tau_regression=pd.DataFrame(np.log(delta_t))
tau_regression['LN_DELTA']=pd.DataFrame(np.log(delta_t))
tau_regression['LN_T']=[np.log(7560) for x in range(len(delta_t))]

def scaling_function(TAU_Q, PARTITION, Q):
    TAU_Q=[0 for x in range(len(Q))]
    
    for i in range(0,len(Q)):
        TAU_Q[i]=((sm.OLS(endog=np.log(list(PARTITION.iloc[i]/PARTITION[1][Q[i]])), exog = tau_regression[['LN_DELTA','LN_T']], missing='drop')).fit()).params[0]
    
    TAU_Q=pd.DataFrame(TAU_Q)

    return TAU_Q
    
print("Here we have defined how to find the estimated scaling function tau_q. \n\nWe are now ready to estimate tau_q for each market.")
Here we have defined how to find the estimated scaling function tau_q. 

We are now ready to estimate tau_q for each market.
In [28]:
tau_q_NORWAY=0
tau_q_SWEDEN=0
tau_q_LIBOR=0

tau_q_NORWAY = scaling_function(tau_q_NORWAY, partition_NORWAY, q)
tau_q_SWEDEN = scaling_function(tau_q_SWEDEN, partition_SWEDEN, q)
tau_q_LIBOR = scaling_function(tau_q_LIBOR, partition_LIBOR, q)

hypothetical_tau_q = [0.5*x -1 for x in q]
In [29]:
plt.grid(b=True)
plt.title("The scaling function for tau_q_NORWAY")

plt.xlabel("q\n(the 'raw' moment)")
plt.ylabel("tau(q)\n(the 'scaling function')")

axes = plt.gca()
axes.set_xlim([-0.5,8.5])
axes.set_ylim([-1,2.5])

plt.plot(q[0:111], tau_q_NORWAY[0:111], color="red",linewidth=3, marker="x", label="observed tau(q)")
plt.plot(q[0:111], hypothetical_tau_q[0:111], color="black", linestyle='--', label="hypothetical monofractal")
plt.legend()
plt.show()
In [30]:
plt.grid(b=True)
plt.title("The scaling function for tau_q_SWEDEN")


plt.xlabel("q\n(the 'raw' moment)")
plt.ylabel("tau(q)\n(the 'scaling function')")

axes = plt.gca()
axes.set_xlim([-0.5,8.5])
axes.set_ylim([-1,2.5])

plt.plot(q[0:111], tau_q_SWEDEN[0:111], color="gold",linewidth=3, marker="x", label="observed tau(q)")
plt.plot(q[0:111], hypothetical_tau_q[0:111], color="black", linestyle='--', label="hypothetical monofractal")
plt.legend()
plt.show()
In [31]:
plt.grid(b=True)
plt.title("The scaling function for tau_q_LIBOR")

plt.xlabel("q\n(the 'raw' moment)")
plt.ylabel("tau(q)\n(the 'scaling function')")

axes = plt.gca()
axes.set_xlim([-0.5,8.5])
axes.set_ylim([-1,2.5])

plt.plot(q[0:111], tau_q_LIBOR[0:111], color="blue",linewidth=3, marker="x", label="observed tau(q)")
plt.plot(q[0:111], hypothetical_tau_q[0:111], color="black", linestyle='--', label="hypothetical monofractal")
plt.legend()
plt.show()
In [32]:
print("Our intercept is somewhere between:")
print(str(tau_q_NORWAY[0][85])+ " for q=" +str(q[85]))
print(str(tau_q_NORWAY[0][86])+ " for q=" +str(q[86]))

print("\nTherefore NORWAY's H is " + str(round(1/q[86], 3)) + " < H < " + str(round(1/q[85], 3)))
Our intercept is somewhere between:
-0.02323130468519012 for q=2.25
-0.0037307426913569808 for q=2.3

Therefore NORWAY's H is 0.435 < H < 0.444
In [33]:
print("Our intercept is somewhere between:")
print(str(tau_q_SWEDEN[0][35])+ " for q=" +str(q[35]))
print(str(tau_q_SWEDEN[0][36])+ " for q=" +str(q[36]))

print("\nTherefore Sweden's H is " + str(round(1/q[36], 3)) + " < H < " + str(round(1/q[35], 3)))
Our intercept is somewhere between:
-0.0031340307339489115 for q=1.84
0.0017816021153757647 for q=1.85

Therefore Sweden's H is 0.541 < H < 0.543
In [34]:
print("Our intercept is somewhere between:")
print(str(tau_q_LIBOR[0][26])+ " for q=" +str(q[26]))
print(str(tau_q_LIBOR[0][27])+ " for q=" +str(q[27]))

print("\nTherefore the UK LIBOR's H is " + str(round(1/q[27], 3)) + " < H < " + str(round(1/q[26], 3)))
Our intercept is somewhere between:
-0.0014986305158324405 for q=1.55
0.02664349951922626 for q=1.6

Therefore the UK LIBOR's H is 0.625 < H < 0.645
In [35]:
max_q=105

print("Here we have defined the highest moment q which we will use for estimating the multifractal spectrum.")
Here we have defined the highest moment q which we will use for estimating the multifractal spectrum.
In [36]:
print("We have seen that the partition function Sq(T,delta_t) for higher moments is less robust.\nSo in estimating our scaling function, we will restrict ourselves to lower moments.")
print("\nHere, we will only use moments up to q=" + str(q[max_q]) + " to estimate the scaling function tau(q) by non-linear regression.")
print("\nNext, using the parameters that we obtained from regression, we will perform\na Legendre transformation on tau(q) to estimate the multifractal spectrum f(a).")


def estimate_multifractal_spectrum(TAU_Q, Q, MIN_Q, MAX_Q):
    TAU_Q_ESTIMATED = np.polyfit(Q[MIN_Q:MAX_Q], TAU_Q[MIN_Q:MAX_Q], 2)
    
    F_A = [0 for x in range(len(q)-10)]
    p = [0 for x in range(len(q)-10)]
    
    a = TAU_Q_ESTIMATED[0][0]
    b = TAU_Q_ESTIMATED[1][0]
    c = TAU_Q_ESTIMATED[2][0]
    
    for i in range(0, len(q)-10):
        p[i] = 2*a*Q[i]+b
        F_A[i] = ((p[i]-b)/(2*a))*p[i] - (a*((p[i]-b)/(2*a))**2 + b*((p[i]-b)/(2*a)) + c)
    
    F_A = pd.DataFrame(F_A)
    F_A.rename(columns={F_A.columns[0]:"f(a)"}, inplace=True)
    F_A['p'] = p
    
    print("Using the range of q's from " + str(Q[MIN_Q]) + " to " + str(Q[MAX_Q]) + ":")
    print("The estimated parameters for tau(q) are: \n" + str(TAU_Q_ESTIMATED))
    print("\nThus, the estimated parameters for f(a) are: \n" + str(1/(4*a)) + ", \n"  + str((-2*b)/(4*a)) + ", \n"+ str((-4*a*c+b**2)/(4*a)))
    
    return F_A
We have seen that the partition function Sq(T,delta_t) for higher moments is less robust.
So in estimating our scaling function, we will restrict ourselves to lower moments.

Here, we will only use moments up to q=4.0 to estimate the scaling function tau(q) by non-linear regression.

Next, using the parameters that we obtained from regression, we will perform
a Legendre transformation on tau(q) to estimate the multifractal spectrum f(a).
In [37]:
f_a_NORWAY = estimate_multifractal_spectrum(tau_q_NORWAY, q, 0, max_q)

print ("\nf_a_NORWAY[f(a)][0] is: " + str(f_a_NORWAY['f(a)'][0]))
Using the range of q's from 0.01 to 4.0:
The estimated parameters for tau(q) are: 
[[-0.02119676]
 [ 0.48380015]
 [-1.00559628]]

Thus, the estimated parameters for f(a) are: 
-11.794255562849537, 
11.412125260624025, 
-1.7549976874789843

f_a_NORWAY[f(a)][0] is: 1.0055941589110933
In [38]:
f_a_SWEDEN = estimate_multifractal_spectrum(tau_q_SWEDEN, q, 0, max_q)


print ("\nf_a_SWEDEN[f(a)][0] is: " + str(f_a_SWEDEN['f(a)'][0]))
Using the range of q's from 0.01 to 4.0:
The estimated parameters for tau(q) are: 
[[-0.02687856]
 [ 0.59061189]
 [-0.99888873]]

Thus, the estimated parameters for f(a) are: 
-9.301094482632855, 
10.98667400844778, 
-2.245541428374209

f_a_SWEDEN[f(a)][0] is: 0.9988860417823353
In [39]:
f_a_LIBOR = estimate_multifractal_spectrum(tau_q_LIBOR, q, 0, max_q)

print ("\nf_a_LIBOR[f(a)][0] is: " + str(f_a_LIBOR['f(a)'][0]))
Using the range of q's from 0.01 to 4.0:
The estimated parameters for tau(q) are: 
[[-0.0544081 ]
 [ 0.72757624]
 [-0.99711442]]

Thus, the estimated parameters for f(a) are: 
-4.594904082944945, 
6.686286081872329, 
-1.4352770263190417

f_a_LIBOR[f(a)][0] is: 0.9971089800927976
In [40]:
plt.grid(b=True)
plt.title("The multifractal spectrum for f_a_NORWAY")

plt.xlabel("a\n(the Hölder exponent)")
plt.ylabel('f(a)\n(the multifractal spectrum)')

# axes = plt.gca()
# axes.set_xlim([0.2,0.7])
# axes.set_ylim([0,1.1])

plt.plot(f_a_NORWAY['p'],f_a_NORWAY['f(a)'], color="red",linewidth=5)
Out[40]:
[<matplotlib.lines.Line2D at 0x1a1cd0e438>]
In [41]:
plt.grid(b=True)
plt.title("The multifractal spectrum for f_a_SWEDEN")

plt.xlabel("a\n(the Hölder exponent)")
plt.ylabel('f(a)\n(the multifractal spectrum)')

# axes = plt.gca()
# axes.set_xlim([0.2,0.8])
# axes.set_ylim([0,1.1])

plt.plot(f_a_SWEDEN['p'],f_a_SWEDEN['f(a)'], color="gold",linewidth=5)
Out[41]:
[<matplotlib.lines.Line2D at 0x1a1cc07a90>]
In [42]:
plt.grid(b=True)
plt.title("The multifractal spectrum for f_a_LIBOR")

plt.xlabel("a\n(the Hölder exponent)")
plt.ylabel('f(a)\n(the multifractal spectrum)')

# axes = plt.gca()
# axes.set_xlim([0.2,0.8])
# axes.set_ylim([0,1.1])

plt.plot(f_a_LIBOR['p'],f_a_LIBOR['f(a)'], color="blue",linewidth=5)
Out[42]:
[<matplotlib.lines.Line2D at 0x1a1cab2588>]
In [43]:
print("Using the parameters for our tau(q)'s that we estimated by non-linear regression, \nwe can calculate precise values for H:")
Using the parameters for our tau(q)'s that we estimated by non-linear regression, 
we can calculate precise values for H:
In [44]:
def estimate_H(TAU_Q, Q, MIN_Q, MAX_Q):
    TAU_Q_ESTIMATED = np.polyfit(Q[MIN_Q:MAX_Q], TAU_Q[MIN_Q:MAX_Q], 2)
    
    def f(x):
        return TAU_Q_ESTIMATED[0][0]*x**2 + TAU_Q_ESTIMATED[1][0]*x + TAU_Q_ESTIMATED[2][0]
    
    temp = fsolve(f, [0,4])
    
    H = 1/temp
    return H[0]

print("Here we have defined our function for precisely estimating H.")
Here we have defined our function for precisely estimating H.
In [45]:
H_NORWAY = estimate_H(tau_q_NORWAY, q, 0, max_q)
H_SWEDEN = estimate_H(tau_q_SWEDEN, q, 0, max_q)
H_LIBOR = estimate_H(tau_q_LIBOR, q, 0, max_q)

print("Here, we calculate our H-values to be:\n")
print("For NORWAY: H = " + str(H_NORWAY))
print("For Sweden: H = " + str(H_SWEDEN))
print("For LIBOR: H = " + str(H_LIBOR))
Here, we calculate our H-values to be:

For NORWAY: H = 0.43235420116535195
For Sweden: H = 0.5415842332894015
For LIBOR: H = 0.6450967232546713
In [46]:
print("Thus, using our estimated multifractal spectra, we get that:")
print("\nNORWAY's estimated alpha zero = " + str(f_a_NORWAY['p'][0]))
print("\nSweden's estimated alpha zero = " + str(f_a_SWEDEN['p'][0]))
print("\nLIBOR's estimated alpha zero = " + str(f_a_LIBOR['p'][0]))
Thus, using our estimated multifractal spectra, we get that:

NORWAY's estimated alpha zero = 0.4833762164921763

Sweden's estimated alpha zero = 0.5900743202288501

LIBOR's estimated alpha zero = 0.7264880791149612
In [47]:
print("From this, it is easy to estimate the means and variances for the log-normal distribution:")
print("\nUsing: \n \n𝜆 = a / H \n \n and \n \n𝜎^2 = (2(𝜆-1))/ln[b]")

def lambda_mean(H,ALPHA):
    LAMBDA = ALPHA/H
    return LAMBDA

def sigma_variance(LAMBDA, B):
    SIGMA_VARIANCE = (2*(LAMBDA-1))/np.log(B)
    return SIGMA_VARIANCE
From this, it is easy to estimate the means and variances for the log-normal distribution:

Using: 
 
𝜆 = a / H 
 
 and 
 
𝜎^2 = (2(𝜆-1))/ln[b]
In [48]:
print("Assuming we will partition our multifractal cascade in two at each step, meaning b=2:")

b=2

lambda_NORWAY = lambda_mean(H_NORWAY, f_a_NORWAY['p'][0])
lambda_SWEDEN = lambda_mean(H_SWEDEN, f_a_SWEDEN['p'][0])
lambda_LIBOR = lambda_mean(H_LIBOR, f_a_LIBOR['p'][0])

sigma_NORWAY = sigma_variance(lambda_NORWAY, b)
sigma_SWEDEN = sigma_variance(lambda_SWEDEN, b)
sigma_LIBOR = sigma_variance(lambda_LIBOR, b)

print("\nFor NORWAY, we estimate\n  𝜆 = " + str(lambda_NORWAY) + "\n  𝜎^2 = " + str(sigma_NORWAY))
print("\nFor Sweden, we estimate\n  𝜆 = " + str(lambda_SWEDEN) + "\n  𝜎^2 = " + str(sigma_SWEDEN))
print("\nFor LIBOR, we estimate\n  𝜆 = " + str(lambda_LIBOR) + "\n  𝜎^2 = " + str(sigma_LIBOR))
Assuming we will partition our multifractal cascade in two at each step, meaning b=2:

For NORWAY, we estimate
  𝜆 = 1.118009759565887
  𝜎^2 = 0.3405041898044085

For Sweden, we estimate
  𝜆 = 1.089533786175672
  𝜎^2 = 0.2583398986153096

For LIBOR, we estimate
  𝜆 = 1.1261692284680203
  𝜎^2 = 0.3640474404471989
In [49]:
print("Here, we can print our final estimated results.\n")
print("We are now ready to begin constructing our MMAR simulations!\n")

RESULTS = pd.DataFrame([0 for x in range(0,3)])
# RESULTS = pd.DataFrame([[H_NORWAY,H_SWEDEN,H_LIBOR],
#  [f_a_NORWAY['p'][0],f_a_SWEDEN['p'][0], f_a_LIBOR['p'][0]],
#  [lambda_NORWAY, lambda_SWEDEN, lambda_LIBOR]
#  [sigma_NORWAY, sigma_SWEDEN, sigma_LIBOR]])

RESULTS['H'] = pd.DataFrame([H_NORWAY,H_SWEDEN,H_LIBOR])
RESULTS['alpha zero'] = pd.DataFrame([f_a_NORWAY['p'][0],f_a_SWEDEN['p'][0], f_a_LIBOR['p'][0]])
RESULTS['lambda'] = pd.DataFrame([lambda_NORWAY, lambda_SWEDEN, lambda_LIBOR])
RESULTS['sigma^2'] = pd.DataFrame([sigma_NORWAY, sigma_SWEDEN, sigma_LIBOR])

RESULTS.rename(index={RESULTS.index[0]:'Norway'}, inplace=True)
RESULTS.rename(index={RESULTS.index[1]:'Sweden'}, inplace=True)
RESULTS.rename(index={RESULTS.index[2]:'UK LIBOR'}, inplace=True)

RESULTS.drop(labels=0, axis=1, inplace=True)

RESULTS

RESULTS.round(decimals=3)
Here, we can print our final estimated results.

We are now ready to begin constructing our MMAR simulations!

Out[49]:
H alpha zero lambda sigma^2
Norway 0.432 0.483 1.118 0.341
Sweden 0.542 0.590 1.090 0.258
UK LIBOR 0.645 0.726 1.126 0.364
In [50]:
RESULTS
Out[50]:
H alpha zero lambda sigma^2
Norway 0.432354 0.483376 1.118010 0.340504
Sweden 0.541584 0.590074 1.089534 0.258340
UK LIBOR 0.645097 0.726488 1.126169 0.364047
In [127]:
tau_q_ESTIMATED_NORWAY=0

tau_q_ESTIMATED_NORWAY=np.polyfit(q[0:max_q],tau_q_NORWAY[0:max_q],2)

for i in range(0,max_q):
    plt.scatter(q[i], (tau_q_ESTIMATED_NORWAY[0][0]*(q[i]**2) + tau_q_ESTIMATED_NORWAY[1][0]*q[i] + tau_q_ESTIMATED_NORWAY[2][0]), s = 20, marker='x')

plt.plot(q[0:max_q], tau_q_NORWAY[0:max_q], color="red",linewidth=5, alpha = 0.5)
#plt.plot(q,tau_q_ESTIMATED_NORWAY, linewidth=1)

plt.grid(b=True)
plt.title("Estimated tau(q) for Norway")
plt.xlabel("q\n(the 'raw' moment)")
plt.axvline(1/H_NORWAY, color="black", linestyle='--', linewidth=0.9, label = "H = 1 / q\n = " + str(round(H_NORWAY, 3)))
plt.axhline(0, color="black", linestyle='--', linewidth=0.9)
plt.ylabel("tau(q)\n(the 'scaling function')")
plt.legend()
plt.show()

print("\nFor NORWAY, we estimate\n  𝜆 = " + str(lambda_NORWAY) + "\n  𝜎^2 = " + str(sigma_NORWAY))
Using the range of q's from 0.01 to 4.0:
The estimated parameters for tau(q) are: 
[[-0.02119676]
 [ 0.48380015]
 [-1.00559628]]

Thus, the estimated parameters for f(a) are: 
-11.794255562849537, 
11.412125260624025, 
-1.7549976874789843

For NORWAY, we estimate
  𝜆 = 1.118009759565887
  𝜎^2 = 0.3405041898044085
In [128]:
tau_q_ESTIMATED_SWEDEN=0

tau_q_ESTIMATED_SWEDEN=np.polyfit(q[0:max_q],tau_q_SWEDEN[0:max_q],2)

for i in range(0,max_q):
    plt.scatter(q[i], (tau_q_ESTIMATED_SWEDEN[0][0]*(q[i]**2) + tau_q_ESTIMATED_SWEDEN[1][0]*q[i] + tau_q_ESTIMATED_SWEDEN[2][0]), s = 20, marker='x')

plt.plot(q[0:max_q], tau_q_SWEDEN[0:max_q], color="gold",linewidth=5, alpha=0.5)
#plt.plot(q,tau_q_ESTIMATED_NORWAY, linewidth=1)

plt.grid(b=True)
plt.title("Estimated tau(q) for Sweden")
plt.xlabel("q\n(the 'raw' moment)")
plt.axvline(1/H_SWEDEN, color="black", linestyle='--', linewidth=0.9, label = "H = 1 / q\n = " + str(round(H_SWEDEN, 3)))
plt.axhline(0, color="black", linestyle='--', linewidth=0.9)
plt.ylabel("tau(q)\n(the 'scaling function')")
plt.legend()
plt.show()

print("\nFor SWEDEN, we estimate\n  𝜆 = " + str(lambda_SWEDEN) + "\n  𝜎^2 = " + str(sigma_SWEDEN))
Using the range of q's from 0.01 to 4.0:
The estimated parameters for tau(q) are: 
[[-0.02687856]
 [ 0.59061189]
 [-0.99888873]]

Thus, the estimated parameters for f(a) are: 
-9.301094482632855, 
10.98667400844778, 
-2.245541428374209

For SWEDEN, we estimate
  𝜆 = 1.089533786175672
  𝜎^2 = 0.2583398986153096
In [129]:
tau_q_ESTIMATED_LIBOR=0

tau_q_ESTIMATED_LIBOR=np.polyfit(q[0:max_q],tau_q_LIBOR[0:max_q],2)

for i in range(0,max_q):
    plt.scatter(q[i], (tau_q_ESTIMATED_LIBOR[0][0]*(q[i]**2) + tau_q_ESTIMATED_LIBOR[1][0]*q[i] + tau_q_ESTIMATED_LIBOR[2][0]), s = 20, marker='x')

plt.plot(q[0:max_q], tau_q_LIBOR[0:max_q], color="blue",linewidth=5, alpha=0.5)
#plt.plot(q,tau_q_ESTIMATED_NORWAY, linewidth=1)

plt.grid(b=True)
plt.title("Estimated tau(q) for LIBOR")
plt.xlabel("q\n(the 'raw' moment)")
plt.axvline(1/H_LIBOR, color="black", linestyle='--', linewidth=0.9, label = "H = 1 / q\n = " + str(round(H_LIBOR, 3)))
plt.axhline(0, color="black", linestyle='--', linewidth=0.9)
plt.ylabel("tau(q)\n(the 'scaling function')")
plt.legend()
plt.show()


print("\nFor LIBOR, we estimate\n  𝜆 = " + str(lambda_LIBOR) + "\n  𝜎^2 = " + str(sigma_LIBOR))
For LIBOR, we estimate
  𝜆 = 1.1261692284680203
  𝜎^2 = 0.3640474404471989
In [ ]:
 
In [55]:
pip install fbm
The following command must be run outside of the IPython shell:

    $ pip install fbm

The Python package manager (pip) can only be used from outside of IPython.
Please reissue the `pip` command in a separate terminal or command prompt.

See the Python documentation for more information on how to install packages:

    https://docs.python.org/3/installing/
In [5]:
from fbm import FBM
from fbm import fbm, fgn, times
from fbm import MBM
from fbm import mbm, mgn, times

import math
In [127]:
def lognormal_cascade(k, v,ln_lambda, ln_theta):
    
    k = k - 1

    m0 = np.random.lognormal(ln_lambda,ln_theta)
    m1 = np.random.lognormal(ln_lambda,ln_theta)
    M = [m0, m1]
    
    if (k >= 0):
        d=[0 for x in range(0,2)]
        for i in range(0,2):
            d[i] = lognormal_cascade(k, (M[i]*v), ln_lambda, ln_theta)
        
        v = d

    return v
In [129]:
def MMAR(K, simulated_H, simulated_lambda, simulated_sigma, original_price_history, magnitude_parameter, GRAPHS):

    # --- VARIABLES ---
    # K
        # adjust K depending on how many days you want to simulate (e.g. if K=13, you'll simulate 2^13=8192 days)
    # simulated_H
        # the Hurst parameter for the fBm process
    # simulated_lambda
        # the mean for the lognormal cascade
    # simulated_sigma
        # the variance for the lognormal cascade
    # original_price_history
        # the price history of the market under study (used for starting the prices from the right time!)
    # magnitude_parameter
        # adjust this up or down to change the range of price changes (e.g. if prices are swinging too wildly every day, then adjust this downwards)
    # GRAPHS
        # Boolean - either True or False - use True if you want the MMAR to simulate graphs for you
        
    # --- MESSAGE ---
    if GRAPHS == True:
        print("Performing an MMAR simulation with parameters:\n\nH = " + str(simulated_H) + "\nlambda = " + str(simulated_lambda) + "\nsigma = " + str(simulated_sigma) + "\nfBm magnitude = " + str(magnitude_parameter)+ "\n")

    # --- CASCADE ---
    new_cascade = list(np.array(lognormal_cascade(k=K, v=1, ln_lambda = simulated_lambda, ln_theta = simulated_sigma)).flat)
    if GRAPHS == True:
#         plt.figure(figsize=(24,2))
        plt.xticks(np.arange(0, 2**(K)+1, 2**(K-3)))
        plt.title("Binomial Cascade")
        plt.xlabel("Conventional time\n(days)")
        plt.ylabel('"Mass"')
        plt.plot(new_cascade, color="crimson", linewidth=0.5)
        plt.show()


    # --- TRADING TIME ---
    tradingtime = 2**K*np.cumsum(new_cascade)/sum(new_cascade)
    if GRAPHS == True:
        plt.xticks(np.arange(0, 2**(K)+1, 2**(K-3)))
        plt.yticks(np.arange(0, 2**(K)+1, 2**(K-3)))
        plt.title("Trading time\n(normalized)")
        plt.xlabel("Conventional time\n(days)")
        plt.ylabel('"Trading time"\n(\u03B8 (t), normalized)')
        plt.plot(tradingtime, color="orangered")

        
    # --- FBM (Fractional Brownian Motion) ---
    new_fbm_class = FBM(n = 10*2**K+1, hurst = simulated_H, length = magnitude_parameter, method='daviesharte')
    new_fbm_simulation = new_fbm_class.fbm()
    if GRAPHS == True:
        plt.figure(figsize=(24,2))
        plt.xticks(np.arange(0, 10*2**(K)+1, 10*2**(K-3)))
        plt.title("Fractional Brownian Motion")
        plt.xlabel("t")
        plt.ylabel('fBm (t)')
        plt.plot(new_fbm_simulation, color="orange")
        plt.show()

        
    # --- MMAR XT's ---
    simulated_xt_array = [0 for x in range(0, len(tradingtime))]

    for i in range(0, len(tradingtime)):
        simulated_xt_array[i] = new_fbm_simulation[int(tradingtime[i]*10)]

    if GRAPHS == True:
        plt.title("MMAR generated Xt")
        plt.xticks(np.arange(0, 2**(K)+1, 2**(K-3)))
        plt.xlabel("Time\n(days)")
        plt.ylabel('X(t)\n(Natural log growth since beginning)')
        plt.grid(b=True)
        plt.fill_between(np.arange(0, 2**K, 1) , simulated_xt_array, color="darkviolet", alpha=0.2)
        plt.show()


    # --- PRICES ---
    if GRAPHS == True:
        plt.title("MMAR generated Price")
        plt.xticks(np.arange(0, 2**(K)+1, 2**(K-3)))
        plt.xlabel("Time\n(days)")
        plt.ylabel('Price level')
        plt.grid(b=True)
        plt.fill_between(np.arange(0, 2**K, 1) , original_price_history[0]*np.exp(simulated_xt_array), color="limegreen", alpha=0.2)
        plt.show()

        
    # --- LN CHANGES ---
    if GRAPHS == True:
        ln_simulated_xt_array = [0 for x in range(0, len(simulated_xt_array)-1)]

        for i in range(1,len(simulated_xt_array)):
            ln_simulated_xt_array[i-1] = np.log((original_price_history[0]*np.exp(simulated_xt_array[i]))/(original_price_history[0]*np.exp(simulated_xt_array[i-1])))
            
        plt.figure(figsize=(24,5))
        plt.title("Price increments")
        plt.xticks(np.arange(0, 2**(K)+1, 2**(K-3)))
        plt.xlabel("Time\n(days)")
        plt.ylabel('Change\n(%)')
        plt.grid(b=True)
        plt.plot(ln_simulated_xt_array, color="darkviolet", linewidth=0.5)
        plt.gca().set_yticklabels(['{:.0f}'.format(x*100) for x in plt.gca().get_yticks()]) 
        plt.show()
        print("The number of price increments that equal zero is: " + str(list(np.abs(ln_simulated_xt_array)).count(0)))
    
    # --- END ---
    return simulated_xt_array
In [469]:
wonderfullife = MMAR(13, H_NORWAY, lambda_NORWAY, sigma_NORWAY, df.Price_NORWAY, 0.15, True)
Performing an MMAR simulation with parameters:

H = 0.43235420116535195
lambda = 1.118009759565887
sigma = 0.3405041898044085
fBm magnitude = 0.15

The number of price increments that equal zero is: 277
In [132]:
new_wonderfullife = MMAR(13, H_NORWAY, lambda_NORWAY, sigma_NORWAY, df.Price_NORWAY, 0.15, True)
Performing an MMAR simulation with parameters:

H = 0.43235420116535195
lambda = 1.118009759565887
sigma = 0.3405041898044085
fBm magnitude = 0.15

The number of price increments that equal zero is: 324
In [1071]:
plt.figure(figsize=(24,5))
plt.title("Price increments")
plt.xticks(np.arange(0, 2**(13)+1, 2**(13-3)))
axes = plt.gca()
axes.set_ylim([-0.11,0.12])
plt.xlabel("Time\n(days)")
plt.grid(b=True)
plt.ylabel('Change\n(%)')
plt.plot(np.diff(wonderfullife), color="darkviolet", linewidth=0.5)
plt.gca().set_yticklabels(['{:.0f}'.format(x*100) for x in plt.gca().get_yticks()]) 
plt.show()
In [ ]:
 
In [65]:
# A test for creating 10,000 columns or 8,192 rows - i.e. an array with81,920,000 elements
# Apparently, everything works fine! It doesn't even take long :D

# del test_big_array

# test_big_array = [[7.0707 for x in range(0,10000)] for y in range(0,8192)]
# test_big_array = pd.DataFrame(test_big_array)
# test_big_array
In [189]:
print("We are now ready to attempt to simulate financial markets!")
We are now ready to attempt to simulate financial markets!
In [164]:
from IPython.display import clear_output

attempt_length = 10000

attempt=pd.DataFrame([0 for x in range(0,2**13)])

start_time = datetime.now().strftime("%I:%M%p")

magnitude_NORWAY = 0.10

for i in range(0,attempt_length):
    print("Having started at "+str(start_time))
    if i%(attempt_length/attempt_length)==0:
        print("Currently at i=" + str(i))
    if i%101==0:
        attempt[i] = pd.DataFrame(MMAR(13, H_NORWAY, lambda_NORWAY, sigma_NORWAY, df.Price_NORWAY, magnitude_NORWAY, False))
    else:
        attempt[i] = pd.DataFrame(MMAR(13, H_NORWAY, lambda_NORWAY, sigma_NORWAY, df.Price_NORWAY, magnitude_NORWAY, False))
    clear_output()

print("Having started at "+str(start_time))
    
attempt
Currently at i=0
Currently at i=500
Currently at i=1000
Currently at i=1500
Currently at i=2000
Currently at i=2500
Currently at i=3000
Currently at i=3500
Currently at i=4000
Currently at i=4500
Currently at i=5000
Currently at i=5500
Currently at i=6000
Currently at i=6500
Currently at i=7000
Currently at i=7500
Currently at i=8000
Currently at i=8500
Currently at i=9000
Currently at i=9500
Out[164]:
0 1 2 3 4 5 6 7 8 9 ... 9990 9991 9992 9993 9994 9995 9996 9997 9998 9999
0 -0.004104 -0.006654 0.001525 0.000000 -0.004715 -0.008864 0.006536 -0.005804 -0.016103 -0.003998 ... 0.003959 0.004775 -0.004254 0.003441 0.003756 -0.001398 -0.001527 0.000279 0.003527 0.003941
1 -0.022198 -0.025900 0.009532 0.000000 0.008606 -0.003459 0.005158 -0.008877 -0.027041 0.008969 ... -0.002782 0.006613 -0.005506 -0.000278 0.007726 -0.013016 -0.010520 -0.002381 0.014040 0.001534
2 -0.025020 -0.036490 0.008816 0.000000 -0.000942 -0.000004 0.007636 0.003379 -0.016275 0.011586 ... -0.000832 -0.001699 -0.010462 -0.011748 0.015196 -0.011202 -0.015531 -0.010298 0.017012 0.004924
3 -0.023396 -0.045451 0.010044 0.000000 0.012951 -0.006722 0.016394 0.017632 -0.020767 0.008453 ... -0.009705 -0.002461 -0.009354 -0.005419 0.012651 -0.008510 -0.015469 -0.021832 0.022068 0.007025
4 -0.013186 -0.045051 0.001920 0.000000 -0.004496 -0.007221 0.014708 0.024112 -0.026589 0.006008 ... -0.010453 -0.007243 -0.012697 -0.012846 0.010645 -0.001409 -0.017539 -0.045021 0.015881 0.005758
5 -0.003105 -0.032204 -0.009144 0.000000 -0.002871 0.011192 0.011236 0.012127 -0.022196 -0.005009 ... -0.009755 -0.008418 -0.015763 -0.013149 0.012524 -0.001710 -0.017788 -0.037235 0.015315 0.008900
6 0.023251 -0.033903 -0.001213 -0.003696 -0.002260 -0.002293 0.014127 0.018915 -0.027271 -0.001849 ... -0.007028 0.000626 -0.017793 -0.022422 0.017197 -0.006866 -0.021269 -0.039204 0.015398 0.016346
7 0.037643 -0.054027 0.005217 -0.003696 0.010904 -0.006043 0.004754 0.011415 -0.029768 -0.006281 ... -0.015455 0.006143 -0.026331 -0.026984 0.013928 -0.009418 -0.018470 -0.037434 0.022080 0.011488
8 0.047917 -0.044636 0.004150 -0.003696 0.008509 -0.006957 0.006067 -0.014804 -0.023818 0.001450 ... -0.004958 0.012542 -0.002293 -0.031799 0.009309 -0.000930 -0.017941 -0.038147 0.028275 0.012447
9 0.039747 -0.073070 0.007098 -0.004132 0.012334 0.001321 -0.000527 -0.020118 -0.021008 -0.004347 ... -0.010447 0.011426 -0.001950 -0.029395 0.013447 0.001048 -0.015234 -0.036585 0.032838 0.008878
10 0.036488 -0.069502 0.015357 -0.004132 0.004722 -0.008279 -0.000700 -0.020734 -0.012571 0.002521 ... -0.023263 0.016470 -0.005597 -0.011394 0.015872 0.000613 -0.020477 -0.025247 0.034399 0.006006
11 0.040022 -0.049306 0.020027 -0.004132 0.009488 0.007350 -0.001422 -0.033444 -0.020532 0.003686 ... -0.024863 0.018386 -0.001874 -0.028550 0.009912 0.000203 -0.019268 -0.029557 0.032422 0.001488
12 0.057564 -0.050957 0.010832 -0.004132 0.011030 0.001636 0.001387 -0.024704 -0.018291 -0.001507 ... -0.029944 0.014852 0.000808 -0.024369 0.010335 0.016866 -0.019060 -0.036381 0.036898 -0.000888
13 0.041344 -0.060449 0.010749 -0.002746 0.010238 -0.003073 -0.000867 -0.018548 -0.022948 -0.003055 ... -0.041094 0.007327 0.005391 -0.025678 0.014896 0.011803 -0.014662 -0.031881 0.041409 0.000970
14 0.049047 -0.042046 0.013360 -0.002746 0.012642 -0.014486 -0.002060 -0.037322 -0.031582 0.000570 ... -0.039650 0.004954 -0.008773 -0.030164 0.008095 0.018539 -0.014071 -0.031126 0.040731 0.003156
15 0.032266 -0.038830 0.009581 -0.002746 0.019772 -0.022300 0.007067 -0.023849 -0.035889 -0.000722 ... -0.036661 0.010890 0.001141 -0.038528 0.000983 0.015738 -0.015542 -0.037461 0.039450 -0.001025
16 0.015649 -0.042822 0.014394 -0.004290 0.014582 -0.019615 0.003500 -0.025015 -0.036454 0.000819 ... -0.037559 -0.002493 -0.005041 -0.037821 0.003642 0.012984 -0.015542 -0.029255 0.030248 -0.003225
17 0.033891 -0.047499 0.004489 -0.004290 0.024777 -0.018946 0.012672 -0.028089 -0.028358 -0.000943 ... -0.043074 -0.017273 -0.002929 -0.049160 0.012365 0.008293 -0.010842 -0.010665 0.043222 -0.003531
18 0.009392 -0.038990 -0.002850 -0.004290 0.023278 -0.019719 0.013433 -0.037842 -0.034061 -0.005838 ... -0.033438 -0.023554 0.000755 -0.051580 0.016962 0.005514 -0.016928 -0.032100 0.056579 0.003600
19 0.012950 -0.043653 -0.003270 -0.004290 0.058410 -0.014463 0.015427 -0.057700 -0.040050 -0.006410 ... -0.025702 -0.012891 0.000506 -0.044941 0.012014 0.002109 -0.015400 -0.028829 0.050314 0.005994
20 0.013235 -0.035983 0.000884 -0.003415 0.042020 -0.018443 0.019358 -0.051764 -0.040133 -0.001089 ... -0.036574 -0.017307 0.005954 -0.052999 0.010477 -0.003112 -0.014982 -0.037959 0.042214 0.002773
21 0.016947 -0.048786 0.001390 -0.004814 0.046733 -0.015984 0.021688 -0.050844 -0.053080 0.002721 ... -0.082814 -0.019407 -0.007002 -0.056035 0.005650 0.007394 -0.016840 -0.045116 0.056718 -0.000339
22 0.013146 -0.064677 -0.003798 -0.004260 0.045238 -0.019904 0.027154 -0.054740 -0.053300 0.006155 ... -0.073362 -0.018572 0.001246 -0.048991 0.002645 0.000777 -0.025955 -0.042399 0.063264 0.006928
23 0.008393 -0.113891 -0.009065 -0.002678 0.044448 -0.020221 0.018194 -0.043851 -0.048294 0.006423 ... -0.065619 -0.019561 -0.016083 -0.066556 -0.002205 0.000826 -0.027589 -0.043347 0.043727 0.003315
24 0.007972 -0.104086 -0.006973 -0.002678 0.057271 -0.023647 0.017995 -0.038525 -0.051219 0.007449 ... -0.056501 -0.021094 -0.027230 -0.077673 0.006714 0.003718 -0.030085 -0.021800 0.039688 0.000803
25 0.020186 -0.114543 0.000218 -0.002678 0.055467 -0.026858 0.007865 -0.037608 -0.053146 0.004887 ... -0.047574 -0.020374 -0.015290 -0.075466 -0.005591 0.009933 -0.028584 -0.009881 0.041095 0.000631
26 0.020583 -0.087657 -0.001659 -0.000976 0.086420 -0.025427 0.012311 -0.037020 -0.062747 0.006720 ... -0.046184 -0.018968 -0.024599 -0.077490 -0.014400 0.016162 -0.034994 -0.001334 0.035259 0.002173
27 0.037825 -0.117435 -0.003257 -0.000976 0.080673 -0.015140 0.016503 -0.042420 -0.051721 -0.002895 ... -0.056466 -0.018095 -0.017199 -0.073177 -0.012895 0.014297 -0.034994 -0.005584 0.044778 0.003143
28 0.035121 -0.100098 -0.003456 -0.000976 0.090731 -0.015278 0.014714 -0.040357 -0.054455 0.000522 ... -0.056399 -0.009322 -0.008778 -0.079885 -0.011488 0.012832 -0.036269 -0.002201 0.043914 0.001043
29 0.028073 -0.086519 -0.003494 -0.003899 0.087912 -0.017639 0.005627 -0.040114 -0.053994 -0.003158 ... -0.046037 -0.003895 -0.001097 -0.077233 -0.015392 0.011379 -0.036269 -0.010504 0.042247 -0.000457
... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
8162 0.168256 -0.525526 0.008908 0.004787 0.034169 -0.407515 0.206325 0.648510 0.004375 -0.362072 ... 0.769078 -0.136253 -0.316993 0.133392 0.187858 0.178085 -0.855671 -0.225228 0.022405 -0.433772
8163 0.178724 -0.519827 0.011627 0.011185 0.051110 -0.407515 0.206990 0.646361 0.006852 -0.355057 ... 0.785888 -0.139483 -0.314441 0.134390 0.196010 0.179228 -0.854774 -0.219655 0.023371 -0.432856
8164 0.146056 -0.519827 0.007200 0.015579 0.054901 -0.404227 0.200928 0.642249 0.006683 -0.346600 ... 0.785304 -0.137007 -0.315743 0.143540 0.204121 0.193442 -0.845287 -0.221833 0.020632 -0.428639
8165 0.156343 -0.521745 0.002186 0.018866 0.058906 -0.404227 0.205466 0.647294 0.008375 -0.344717 ... 0.779935 -0.121524 -0.315598 0.138584 0.198857 0.188928 -0.841805 -0.222994 0.016300 -0.428135
8166 0.147775 -0.520650 0.003204 0.015285 0.041114 -0.406556 0.208046 0.644105 0.008059 -0.342170 ... 0.780688 -0.108259 -0.319202 0.135067 0.197700 0.189837 -0.843247 -0.225846 0.014422 -0.428135
8167 0.143364 -0.520650 0.002176 -0.005926 0.040588 -0.406556 0.213313 0.640349 0.006826 -0.355932 ... 0.783114 -0.108285 -0.320486 0.137017 0.193860 0.195728 -0.846957 -0.213999 0.011121 -0.428428
8168 0.130525 -0.517126 0.002176 0.003527 0.034811 -0.406202 0.199969 0.641593 -0.002509 -0.357843 ... 0.767574 -0.119041 -0.315499 0.139992 0.191885 0.193849 -0.859802 -0.212385 0.014876 -0.425207
8169 0.138104 -0.517343 0.000084 0.005455 0.017461 -0.403743 0.203778 0.646306 0.014441 -0.358357 ... 0.765241 -0.120551 -0.319712 0.131163 0.189951 0.188611 -0.864600 -0.210568 0.010111 -0.423952
8170 0.137035 -0.517160 0.002460 0.000896 0.016895 -0.404415 0.191754 0.645726 0.010497 -0.355865 ... 0.768725 -0.119675 -0.317489 0.135385 0.185559 0.188506 -0.865835 -0.215617 0.016523 -0.421591
8171 0.146094 -0.517902 0.000593 0.012238 0.019757 -0.402384 0.189780 0.645697 0.000675 -0.350979 ... 0.770242 -0.115488 -0.316637 0.139670 0.186910 0.180216 -0.862500 -0.220245 0.018496 -0.426481
8172 0.153200 -0.518602 -0.000973 0.003206 0.021933 -0.402384 0.190599 0.648499 0.003998 -0.355469 ... 0.759750 -0.112920 -0.319748 0.134260 0.182827 0.184299 -0.870190 -0.220120 0.010865 -0.431787
8173 0.136756 -0.526741 -0.006565 0.001634 0.028898 -0.397753 0.185477 0.649753 0.008557 -0.355982 ... 0.771157 -0.123409 -0.320612 0.134905 0.188053 0.184879 -0.874005 -0.227066 0.009789 -0.429404
8174 0.134626 -0.519737 -0.001999 0.002389 0.030704 -0.397753 0.178128 0.647626 0.024198 -0.360062 ... 0.767184 -0.113760 -0.327699 0.138371 0.188935 0.184182 -0.867148 -0.224176 0.007818 -0.428499
8175 0.117264 -0.518154 -0.003895 0.011878 0.040701 -0.394126 0.184511 0.649512 0.018781 -0.361618 ... 0.773109 -0.106255 -0.327116 0.127355 0.196218 0.178810 -0.870213 -0.230163 0.012681 -0.432415
8176 0.111468 -0.515936 -0.004552 0.018347 0.036607 -0.393635 0.184044 0.651170 0.016269 -0.357717 ... 0.768396 -0.099177 -0.329879 0.141710 0.197284 0.171782 -0.865495 -0.221889 0.011678 -0.425027
8177 0.135527 -0.515936 -0.009807 0.024993 0.044175 -0.395918 0.185241 0.645873 0.021464 -0.366905 ... 0.771946 -0.114757 -0.329343 0.139024 0.186585 0.174574 -0.865942 -0.219628 0.012786 -0.420609
8178 0.142388 -0.520681 -0.011999 0.022179 0.037719 -0.388078 0.184595 0.643813 0.018145 -0.364593 ... 0.772120 -0.119590 -0.330869 0.141554 0.197541 0.174377 -0.868191 -0.221117 0.010407 -0.430936
8179 0.137812 -0.520681 -0.020488 0.038301 0.028206 -0.387145 0.184810 0.645920 0.012723 -0.364689 ... 0.770696 -0.120868 -0.332229 0.138078 0.206193 0.172515 -0.862525 -0.224997 0.010065 -0.432770
8180 0.128693 -0.525045 -0.004531 0.026148 0.031211 -0.377350 0.183668 0.646403 0.013558 -0.365030 ... 0.767356 -0.105947 -0.322952 0.143232 0.217442 0.174500 -0.863779 -0.221449 0.002606 -0.433588
8181 0.124546 -0.525045 -0.023396 0.034050 0.031792 -0.378793 0.187399 0.645282 0.012566 -0.366993 ... 0.767070 -0.116148 -0.324282 0.139128 0.219764 0.171344 -0.860284 -0.220919 0.003420 -0.433485
8182 0.133646 -0.524055 -0.017626 0.047484 0.020233 -0.374715 0.174875 0.645807 0.014857 -0.365573 ... 0.766210 -0.122610 -0.320879 0.147983 0.209282 0.172616 -0.865299 -0.217677 0.013919 -0.439057
8183 0.125961 -0.524055 -0.016657 0.053874 0.020602 -0.374435 0.179325 0.645834 0.019308 -0.365573 ... 0.763080 -0.121323 -0.323663 0.145460 0.205108 0.177979 -0.864979 -0.217434 0.014578 -0.434007
8184 0.133776 -0.527965 -0.011001 0.054947 0.013320 -0.376375 0.181495 0.643386 0.026937 -0.367907 ... 0.764590 -0.132770 -0.327168 0.146591 0.195512 0.167672 -0.866521 -0.219563 0.012179 -0.430277
8185 0.117628 -0.523487 -0.016646 0.042792 0.017191 -0.376375 0.180086 0.637654 0.018343 -0.366572 ... 0.765022 -0.114270 -0.325421 0.144131 0.192305 0.162062 -0.872512 -0.224017 0.011547 -0.434081
8186 0.120396 -0.522151 -0.016265 0.036744 0.018359 -0.375011 0.183718 0.636530 0.029484 -0.369022 ... 0.768292 -0.120108 -0.330885 0.141200 0.184491 0.157273 -0.872848 -0.220563 0.020901 -0.430182
8187 0.123080 -0.521766 -0.020072 0.038462 0.029661 -0.375011 0.180244 0.631635 0.033114 -0.364164 ... 0.777411 -0.124697 -0.331073 0.144431 0.184546 0.163075 -0.866073 -0.220483 0.021703 -0.431160
8188 0.124275 -0.523292 -0.020530 0.053960 0.031109 -0.378505 0.183980 0.633953 0.030295 -0.362009 ... 0.777708 -0.118460 -0.334332 0.138553 0.182936 0.171211 -0.864003 -0.216648 0.013244 -0.432972
8189 0.115716 -0.523749 -0.024828 0.029693 0.032078 -0.373487 0.182335 0.636978 0.025690 -0.361475 ... 0.770688 -0.109985 -0.334332 0.129114 0.181180 0.168203 -0.869662 -0.219761 0.012497 -0.431803
8190 0.102281 -0.526198 -0.027101 0.022148 0.021253 -0.381781 0.181143 0.639738 0.021012 -0.359486 ... 0.769742 -0.111601 -0.331660 0.134494 0.175447 0.173779 -0.865829 -0.219883 0.015618 -0.429225
8191 0.110096 -0.522745 -0.029509 0.016162 0.027308 -0.381739 0.177800 0.644849 0.000869 -0.359842 ... 0.781409 -0.111907 -0.329575 0.140563 0.169483 0.173940 -0.869516 -0.222166 0.010783 -0.439802

8192 rows × 10000 columns

In [173]:
# attempt.to_csv('Norway_attempt.csv')
In [154]:
plt.figure(figsize=(24,10))

plt.plot(df.Price_NORWAY, color="black", linewidth=2, label="Real Norwegian price path")

plt.legend()

for i in range(0,len(attempt.columns)):
    K=13
    plt.title(str(attempt_length) + " MMAR generated price paths")
    plt.xticks(np.arange(0, 2**(K)+1, 2**(K-3)))
    plt.xlabel("Time\n(days)")
    plt.ylabel('Price level')
    plt.grid(b=True)
    plt.plot(np.arange(0, 2**K, 1) , df.Price_NORWAY[0]*np.exp(attempt[i]), color="orangered", linewidth=0.1, alpha=0.2)

plt.plot(df.Price_NORWAY, color="black", linewidth=2)

plt.show()
In [1060]:
plt.grid(b=True)
plt.title("Histogram of simulated final prices\nafter 8192 days for Norway")
plt.hist(df.Price_NORWAY[0]*np.exp(attempt.iloc[8191]), bins=40, color="orangered")
plt.xlabel("Final price (USD/NOK)")
plt.ylabel("Frequency")
plt.axvline(df.Price_NORWAY[0]*np.exp(attempt.iloc[8191].quantile(0.05)), linestyle='--', color="purple", label="5th percentile")
plt.axvline(df.Price_NORWAY[0]*np.exp(attempt.iloc[8191].quantile(0.25)), linestyle='--', color="blue", label="25th percentile")
plt.axvline(df.Price_NORWAY[0]*np.exp(attempt.iloc[8191].quantile(0.5)), linestyle='--', color="forestgreen", label="50th percentile")
plt.axvline(df.Price_NORWAY[0]*np.exp(attempt.iloc[8191].quantile(0.75)), linestyle='--', color="gold", label="75th percentile")
plt.axvline(df.Price_NORWAY[0]*np.exp(attempt.iloc[8191].quantile(0.95)), linestyle='--', color="red", label="95th percentile")
plt.axvline(df.Price_NORWAY.iloc[-1], linestyle='--', color="black", label="real final price", linewidth=0.7)
plt.legend()
plt.show()
In [176]:
# Here we calculate the LN differences

diff_attempt = [[0 for x in range(0,attempt_length)] for y in range (0,8192)]
for i in range(0,attempt_length):
    if i(attempt_length/attempt_length) == 0:
        print("i="+str(i))
    for j in range (1,8192):
        diff_attempt[j][i]=attempt[i][j] - attempt[i][j-1]
    clear_output()
    
diff_attempt = pd.DataFrame(diff_attempt)

diff_attempt.to_csv('Norway_diff_attempt.csv')
i=0
i=500
i=1000
i=1500
i=2000
i=2500
i=3000
i=3500
i=4000
i=4500
i=5000
i=5500
i=6000
i=6500
i=7000
i=7500
i=8000
i=8500
i=9000
i=9500
In [209]:
# Here we show the original graph as a reminder

plt.figure(figsize=(24,5))
plt.title("Price increments for Norway")
plt.xticks(np.arange(0, 2**(13)+1, 2**(13-3)))

plt.xlabel("Time\n(days)")
plt.grid(b=True)
plt.ylabel('Change\n(%)')

axes = plt.gca()
axes.set_ylim([-0.10,0.10])

plt.plot(df.LN_NORWAY, color="red", linewidth=0.5)
plt.show()
In [210]:
# Here we show the first twenty graphs of price changes

for i in range(0,20):
    plt.figure(figsize=(24,5))
    plt.title("Price increments for simulation " + str(i+1))
    plt.xticks(np.arange(0, 2**(13)+1, 2**(13-3)))

    plt.xlabel("Time\n(days)")
    plt.grid(b=True)
    plt.ylabel('Change\n(%)')
    
    axes = plt.gca()
    axes.set_ylim([-0.10,0.10])
    
    plt.plot(diff_attempt[i], color="darkviolet", linewidth=0.5)
    plt.show()
In [179]:
print("Here we can see some statistics about the price changes in the Norwegian exchange rate.\n")

df.LN_NORWAY.describe()
Here we can see some statistics about the price changes in the Norwegian exchange rate.

Out[179]:
count    7560.000000
mean        0.000033
std         0.007228
min        -0.064440
25%        -0.004051
50%         0.000000
75%         0.003942
max         0.059688
Name: LN_NORWAY, dtype: float64
In [196]:
df.LN_NORWAY.std()
Out[196]:
0.007228071748173812
In [1046]:
plt.grid(b=True)
plt.title("Mean price change for \n" + str(attempt_length) + " MMAR generated price paths\nfor NORWAY")
plt.hist(diff_attempt.mean(), bins=30, color='orangered')
plt.axvline(df.LN_NORWAY.mean(), linestyle='--', color="black", label="real mean\n= "+str(round(df.LN_NORWAY.mean(), 6)))
plt.axvline(0, linestyle='--', color="forestgreen", label="mean = zero")
plt.ylabel("Frequency per " + str(attempt_length))
plt.xlabel("Mean")
plt.legend()
plt.show()
print((diff_attempt.mean()).describe())

plt.grid(b=True)
plt.title("Standard deviation of price changes for \n" + str(attempt_length) + " MMAR generated price paths\nfor NORWAY")
plt.hist(diff_attempt.std(), bins=30, color='orangered')
plt.axvline(df.LN_NORWAY.std(), linestyle='--', color="black", label="real standard deviation\n= "+str(round(df.LN_NORWAY.std(), 5)))
plt.ylabel("Frequency per " + str(attempt_length))
plt.xlabel("Standard Deviation")
plt.legend()
plt.show()


print("\n\nBelow is a description of the standard deviations of our 10,000 simulations.\n")
(diff_attempt.std()).describe()
count    1.000000e+04
mean     4.783070e-07
std      4.479205e-05
min     -1.606284e-04
25%     -2.944317e-05
50%      5.766159e-07
75%      3.035007e-05
max      1.811756e-04
dtype: float64

Below is a description of the standard deviations of our 10,000 simulations.

Out[1046]:
count    10000.000000
mean         0.007215
std          0.000106
min          0.006693
25%          0.007148
50%          0.007218
75%          0.007286
max          0.007604
dtype: float64
In [181]:
print("The mean is obviously very variable - we can simulate both downward and upward markets.\n\nWe can see that the standard deviation is also quite variable, but it is mostly near to what we expect.")
The mean is obviously very variable - we can simulate both downward and upward markets.

We can see that the standard deviation is also quite variable, but it is mostly near to what we expect.
In [182]:
print("Here's the kurtosis for the real data for Norway.")

stats.kurtosis(df.LN_NORWAY[1:-1])
Here's the kurtosis for the real data for Norway.
Out[182]:
4.279929706945225
In [1181]:
plt.figure(figsize=(16,4))
plt.grid(b=True)
plt.title("Kurtosis of price changes for \n" + str(attempt_length) + " MMAR generated price paths\nfor Norway")
plt.hist(stats.kurtosis(diff_attempt), bins=100, color='orangered')
plt.axvline(stats.kurtosis(df.LN_NORWAY[1:-1]), linestyle='--', color="black", label="real kurtosis\n= "+str(round(stats.kurtosis(df.LN_NORWAY[1:-1]), 3)))
plt.axvline(0, linestyle='--', color="aqua", label="Gaussian kurtosis = 0")

kurtosis_array_diff_attempt = stats.kurtosis(diff_attempt)
plt.axvline(np.quantile(kurtosis_array_diff_attempt, 0.05), linestyle='--', color="purple", label="5th percentile")
plt.axvline(np.quantile(kurtosis_array_diff_attempt,0.25), linestyle='--', color="blue", label="25th percentile")
plt.axvline(np.quantile(kurtosis_array_diff_attempt,0.5), linestyle='--', color="forestgreen", label="50th percentile")
plt.axvline(np.quantile(kurtosis_array_diff_attempt,0.75), linestyle='--', color="gold", label="75th percentile")
plt.axvline(np.quantile(kurtosis_array_diff_attempt,0.95), linestyle='--', color="red", label="95th percentile")

plt.ylabel("Frequency per " + str(attempt_length))
plt.xlabel("Kurtosis")
plt.legend()
plt.show()
In [184]:
print("But at least the kurtosis is about right!")
But at least the kurtosis is about right!
In [208]:
print("Below is the mean kurtosis of the price changes for every simulation.\nKeep in mind that it is overweighted by the heavy right tail.\n")

stats.kurtosis(diff_attempt).mean()
Below is the mean kurtosis of the price changes for every simulation.
Keep in mind that it is overweighted by the heavy right tail.

Out[208]:
5.15394832632845
In [701]:
print("The median kurtosis is about right. The real kurtosis was "+str(stats.kurtosis(df.LN_NORWAY[1:-1]))+".\n")

np.median(stats.kurtosis(diff_attempt))
The median kurtosis is about right. The real kurtosis was 4.279929706945225.

Out[701]:
4.616610555074015
In [827]:
# Here we test how well the simulations fit the original data, using the Kolmogorov-Smirnov test to test the log price changes

ks_test_NORWAY = [0 for x in range(0, attempt_length)]
for i in range(0,attempt_length):
    ks_test_NORWAY[i] = stats.ks_2samp(diff_attempt[i], df.LN_NORWAY).pvalue
    

plt.grid(b=True)
plt.hist(ks_test_NORWAY, bins=20, color='orangered')
plt.title("Histogram of Kolmogorov-Smirnov P-values\nfor " + str(attempt_length) + " MMAR simulations for Norway ")
plt.axvline(0.05, linestyle='--', color="black", label="5% significance")
plt.xlabel("P-Value")
plt.ylabel("Frequency per " + str(attempt_length))
plt.legend()
plt.show()

plt.grid(b=True)
plt.plot(sorted(ks_test_NORWAY), color='orangered')
plt.title("Sorted Kolmogorov-Smirnov P-values\nfor " + str(attempt_length) + " MMAR simulations for Norway ")
plt.axhline(0.05, linestyle='--', color="black", label="5% significance")
plt.axvline(sum(x<0.05 for x in ks_test_NORWAY), linestyle='--', color="purple", label="Total rejections = "+str(sum(x<0.05 for x in ks_test_NORWAY)))
plt.xlabel("Index ouf of " + str(attempt_length))
plt.ylabel("P-Value")
plt.legend()
plt.show()

print("Count of P<0.05 = "+ str(sum(x<0.05 for x in ks_test_NORWAY)) + " failures out of " + str(attempt_length) + ".")
Count of P<0.05 = 7372 failures out of 10000.
In [188]:
print("We can see that most simulations fail the Kolmogorov-Smirnov test, which is a shame.\n")
print("That said, the test may be overly sensitive.\n")
print("But unfortunately, the authors don't provide any better way to test if an MMAR simulation fits the real data.")
We can see that most simulations fail the Kolmogorov-Smirnov test, which is a shame.

That said, the test may be overly sensitive.

But unfortunately, the authors don't provide any better way to test if an MMAR simulation fits the real data.
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [842]:
print("Now we encounter a problem: we see that the mean return for an MMAR simulation is 0.\n")
print("For stocks of rich countries, we don't expect the stock index to go down over 30 years,\nso we can't leave our price increments with an expectation of zero.")
print("\nWe must therefore introduce an upward bias into our price movement simulation.")
print("\nHere, we will multiply our simulated Xt by e to the power of the mean daily return over 30 years")
print("(in our case, the mean daily return was " + str(round(df.LN_SWEDEN.mean(), 6)*100) + "% per day.)  This will bias the price to move upwards.")
print("\nBut we also don't want our prices to move in a way that's too extreme, so we will restrict our prices\nfrom ever exceeding a barrier that's 4x higher than the real price at the end of thirty years.")
print("This is admittedly an arbitray number, but it is comparable to the Nikkei 225 index, \nwhich rose to nearly 40,000 in the 80's and was still only around 10,000 over twenty years later. ")
Now we encounter a problem: we see that the mean return for an MMAR simulation is 0.

For stocks of rich countries, we don't expect the stock index to go down over 30 years,
so we can't leave our price increments with an expectation of zero.

We must therefore introduce an upward bias into our price movement simulation.

Here, we will multiply our simulated Xt by e to the power of the mean daily return over 30 years
(in our case, the mean daily return was 0.0325% per day.)  This will bias the price to move upwards.

But we also don't want our prices to move in a way that's too extreme, so we will restrict our prices
from ever exceeding a barrier that's 4x higher than the real price at the end of thirty years.
This is admittedly an arbitray number, but it is comparable to the Nikkei 225 index, 
which rose to nearly 40,000 in the 80's and was still only around 10,000 over twenty years later. 
In [839]:
def MMAR_STOCKS(K, simulated_H, simulated_lambda, simulated_sigma, original_price_history, magnitude_parameter, GRAPHS, FBMCLASS, FBM_TOP_LIMIT, FBM_BOTTOM_LIMIT, MEAN_RETURN):

    # --- VARIABLES ---
    # K
        # adjust K depending on how many days you want to simulate (e.g. if K=13, you'll simulate 2^13=8192 days)
    # simulated_H
        # the Hurst parameter for the fBm process
    # simulated_lambda
        # the mean for the lognormal cascade
    # simulated_sigma
        # the variance for the lognormal cascade
    # original_price_history
        # the price history of the market under study (used for starting the prices from the right time!)
    # magnitude_parameter
        # adjust this up or down to change the range of price changes (e.g. if prices are swinging too wildly every day, then adjust this downwards)
    # GRAPHS
        # Boolean - either True or False - use True if you want the MMAR to simulate graphs for you
        
    # --- MESSAGE ---
    if GRAPHS == True:
        print("Performing an MMAR simulation with parameters:\n\nH = " + str(simulated_H) + "\nlambda = " + str(simulated_lambda) + "\nsigma = " + str(simulated_sigma) + "\nfBm magnitude = " + str(magnitude_parameter)+ "\n")

    # --- CASCADE ---
    new_cascade = list(np.array(lognormal_cascade(k=K, v=1, ln_lambda = simulated_lambda, ln_theta = simulated_sigma)).flat)
    if GRAPHS == True:
        plt.figure(figsize=(24,2))
        plt.xticks(np.arange(0, 2**(K)+1, 2**(K-3)))
        plt.title("Binomial Cascade")
        plt.xlabel("Conventional time\n(days)")
        plt.ylabel('"Mass"')
        plt.plot(new_cascade, color="crimson", linewidth=0.5)
        plt.show()


    # --- TRADING TIME ---
    tradingtime = 2**K*np.cumsum(new_cascade)/sum(new_cascade)
    if GRAPHS == True:
        plt.xticks(np.arange(0, 2**(K)+1, 2**(K-3)))
        plt.yticks(np.arange(0, 2**(K)+1, 2**(K-3)))
        plt.title("Trading time\n(normalized)")
        plt.xlabel("Conventional time\n(days)")
        plt.ylabel('"Trading time"\n(\u03B8 (t), normalized)')
        plt.plot(tradingtime, color="orangered")

        
    # --- FBM (Fractional Brownian Motion) ---
        
        # Here we are adding the complicating factor. It is a recursive method to keep simulating fBm's until we get one that gives us a realistic price movement.
    def fbm_attempt(fbmclass, fbm_top_limit, fbm_bottom_limit, mean_return, original_prices):
        # --- FBM (Fractional Brownian Motion) ---
        simulated_fbm = fbmclass.fbm()
        
        xt_top_limit = np.log(fbm_top_limit/original_prices[0])
        xt_bottom_limit = np.log(fbm_bottom_limit/original_prices[0])
        
        # --- MMAR XT's (temporary)---
        TEMPORARY_simulated_xt_array = [simulated_fbm[int(tradingtime[x]*10)]+x*mean_return for x in range(0, len(tradingtime))] # Here, since it's stocks, we also add a multiple of the mean return for every Xt value
        
        if all(XT < xt_top_limit for XT in TEMPORARY_simulated_xt_array) and all(XT > xt_bottom_limit for XT in TEMPORARY_simulated_xt_array):
            return TEMPORARY_simulated_xt_array
            # --- Graphs for FBM ---
            if GRAPHS == True:
                plt.figure(figsize=(24,2))
                plt.xticks(np.arange(0, 10*2**(K)+1, 10*2**(K-3)))
                plt.title("Fractional Brownian Motion")
                plt.xlabel("t")
                plt.ylabel('fBm (t)')
                plt.plot(simulated_fbm, color="orange")
                plt.show()   
        else:
            return fbm_attempt(fbmclass, fbm_top_limit, fbm_bottom_limit, mean_return, original_prices)
        

        
    # --- MMAR XT's ---
    simulated_xt_array = fbm_attempt(FBMCLASS, FBM_TOP_LIMIT, FBM_BOTTOM_LIMIT, MEAN_RETURN, original_price_history)

        
    # --- Graphs for MMAR XT's ---
    if GRAPHS == True:
        plt.title("MMAR generated Xt")
        plt.xticks(np.arange(0, 2**(K)+1, 2**(K-3)))
        plt.xlabel("Time\n(days)")
        plt.ylabel('X(t)\n(Natural log growth since beginning)')
        plt.grid(b=True)
        plt.fill_between(np.arange(0, 2**K, 1) , simulated_xt_array, color="darkviolet", alpha=0.2)
        plt.show()


    # --- PRICES ---
    if GRAPHS == True:
        plt.title("MMAR generated Price")
        plt.xticks(np.arange(0, 2**(K)+1, 2**(K-3)))
        plt.xlabel("Time\n(days)")
        plt.ylabel('Price level')
        plt.grid(b=True)
        plt.fill_between(np.arange(0, 2**K, 1) , original_price_history[0]*np.exp(simulated_xt_array), color="limegreen", alpha=0.2)
        plt.show()

        
    # --- LN CHANGES ---
    if GRAPHS == True:
        ln_simulated_xt_array = [0 for x in range(0, len(simulated_xt_array)-1)]

        for i in range(1,len(simulated_xt_array)):
            ln_simulated_xt_array[i-1] = np.log((original_price_history[0]*np.exp(simulated_xt_array[i]))/(original_price_history[0]*np.exp(simulated_xt_array[i-1])))
            
        plt.figure(figsize=(24,5))
        plt.title("Price increments")
        plt.xticks(np.arange(0, 2**(K)+1, 2**(K-3)))
        plt.xlabel("Time\n(days)")
        plt.ylabel('Change\n(%)')
        plt.grid(b=True)
        plt.plot(ln_simulated_xt_array, color="darkviolet", linewidth=0.5)
        plt.gca().set_yticklabels(['{:.0f}'.format(x*100) for x in plt.gca().get_yticks()]) 
        plt.show()
        print("The number of price increments that equal zero is: " + str(list(np.abs(ln_simulated_xt_array)).count(0)))
    
    # --- END ---
    return simulated_xt_array
In [992]:
from IPython.display import clear_output

attempt2_length = 10000

attempt2=pd.DataFrame([0 for x in range(0,2**13)])

magnitude_SWEDEN = 3.24

FBM_class_SWEDEN = FBM(n = 10*2**13+1, hurst = H_SWEDEN, length = magnitude_SWEDEN, method='daviesharte')

start_time = datetime.now().strftime("%I:%M%p")

for i in range(0,attempt2_length):
    print("Having started at "+str(start_time))
    if i%(attempt2_length/attempt2_length)==0:
        print("Currently at i=" + str(i) + " out of " + str(attempt2_length))
    if i%101==0:
        attempt2[i] = pd.DataFrame(MMAR_STOCKS(13, H_SWEDEN, lambda_SWEDEN, sigma_SWEDEN, df.Price_SWEDEN, magnitude_SWEDEN, GRAPHS=False, FBMCLASS=FBM_class_SWEDEN, FBM_TOP_LIMIT=5000, FBM_BOTTOM_LIMIT=100, MEAN_RETURN=df.LN_SWEDEN.mean()))
    else:
        attempt2[i] = pd.DataFrame(MMAR_STOCKS(13, H_SWEDEN, lambda_SWEDEN, sigma_SWEDEN, df.Price_SWEDEN, magnitude_SWEDEN, GRAPHS=False, FBMCLASS=FBM_class_SWEDEN, FBM_TOP_LIMIT=5000, FBM_BOTTOM_LIMIT=100, MEAN_RETURN=df.LN_SWEDEN.mean()))
    clear_output()

print("Having started at "+str(start_time))    

attempt2
Having started at 12:18AM
Out[992]:
0 1 2 3 4 5 6 7 8 9 ... 9990 9991 9992 9993 9994 9995 9996 9997 9998 9999
0 -0.000114 -0.010153 0.003611 0.001478 0.011841 -0.005647 -0.000759 -0.003813 -0.004202 0.001683 ... -0.007713 -0.003467 0.003005 0.003985 -0.003611 0.025521 0.005750 -0.014332 0.000000 0.009590
1 -0.006433 -0.021725 -0.004825 -0.001211 0.042306 0.000416 -0.001253 -0.005082 -0.004139 -0.026172 ... 0.004267 0.000183 0.007322 -0.013315 -0.002035 0.077039 -0.001172 -0.006648 -0.002434 0.031067
2 -0.005867 -0.061715 0.012571 -0.004235 0.039881 0.010986 0.012344 -0.020796 -0.007927 0.015522 ... 0.014386 0.004856 0.002355 -0.018269 -0.001256 0.075331 0.004830 -0.000117 -0.000693 0.036581
3 0.006629 -0.064578 0.015985 -0.011700 0.030554 0.027397 0.012463 0.029331 -0.014908 0.010012 ... 0.014677 -0.004465 0.010697 -0.016811 -0.002516 0.083669 0.023073 0.019088 0.000768 0.019659
4 0.007299 -0.072208 0.040801 -0.014336 0.041244 0.029613 0.005077 0.024217 -0.025806 -0.024877 ... 0.015258 0.013071 -0.003955 -0.011071 -0.003931 0.107055 0.024820 0.005737 0.006641 0.031951
5 0.013131 -0.086297 0.043331 -0.022132 0.057341 0.040690 0.002386 0.024073 -0.035087 0.027438 ... 0.019596 0.022616 0.002423 -0.018878 0.008880 0.145018 0.023769 -0.004472 0.003932 0.045130
6 -0.010529 -0.107857 0.045957 -0.034163 0.056488 0.052777 0.003704 0.029102 -0.021285 -0.021492 ... 0.016133 0.027634 0.009732 -0.013354 0.006537 0.171146 0.023908 0.004586 0.004562 0.039547
7 -0.020800 -0.090267 0.019827 -0.027238 0.059914 0.067842 0.003236 0.030168 -0.011211 -0.017712 ... 0.018002 0.025476 0.016758 -0.019448 0.008160 0.202189 0.016016 -0.007714 0.010556 0.022348
8 -0.015481 -0.089378 0.015915 -0.034872 0.082771 0.063484 0.002057 0.035751 -0.020315 0.029913 ... 0.021053 0.028696 0.005578 -0.028867 -0.001949 0.220458 0.028275 0.009699 0.009985 0.000718
9 -0.014246 -0.095031 0.012379 -0.025076 0.087614 0.058759 0.001975 0.038873 -0.039111 0.003446 ... 0.021378 0.026333 -0.000875 -0.025029 -0.007440 0.230813 0.021282 -0.008361 0.002160 0.006682
10 -0.013268 -0.109511 0.007443 -0.032811 0.089575 0.065257 0.008865 0.043690 -0.033287 0.011571 ... 0.019075 0.012837 -0.011196 -0.040463 -0.014677 0.234974 0.036954 -0.007446 0.005807 0.007174
11 0.010229 -0.110526 0.003430 -0.039003 0.083371 0.060186 0.010578 0.038661 -0.018530 0.049736 ... 0.033485 0.032854 -0.004787 -0.051007 -0.037525 0.216977 0.024964 -0.004671 0.000460 0.005810
12 0.012837 -0.084151 0.009824 -0.044015 0.087555 0.040026 -0.003638 0.040448 -0.028980 0.027214 ... 0.033586 0.047489 -0.015944 -0.050100 -0.022153 0.228361 0.027085 0.004350 0.002424 0.008373
13 0.011444 -0.089935 -0.002240 -0.044722 0.110483 0.053726 -0.013645 0.044989 -0.034224 0.022543 ... 0.032743 0.062761 -0.006624 -0.056527 -0.016890 0.231997 0.002269 0.022179 0.001562 -0.007143
14 0.011514 -0.093573 0.009403 -0.037808 0.086345 0.048662 0.007823 0.050872 -0.035106 0.050931 ... 0.026628 0.083947 -0.003698 -0.056544 -0.036226 0.226508 0.003032 0.018878 -0.000532 -0.003567
15 0.039095 -0.098208 0.003429 -0.033662 0.088354 0.042404 0.011405 0.061760 -0.036439 0.065611 ... 0.027089 0.076580 0.003931 -0.049879 -0.030776 0.241118 0.002710 0.053672 -0.000208 0.008827
16 0.063990 -0.111537 0.029997 -0.038930 0.081097 0.044280 0.008137 0.063433 -0.048126 0.041546 ... 0.034591 0.066750 0.034293 -0.049526 -0.050348 0.241858 0.013666 -0.005948 -0.007015 -0.004283
17 0.070768 -0.104410 0.028998 -0.037814 0.086877 0.026164 0.007057 0.056079 -0.038428 0.055402 ... 0.037560 0.057276 0.016389 -0.072136 -0.048494 0.217834 0.016488 -0.051179 0.004217 0.003252
18 0.056380 -0.084001 0.027397 -0.034504 0.073958 0.029465 0.027320 0.040154 -0.055583 0.065026 ... 0.037549 0.057725 0.012187 -0.078367 -0.056435 0.224440 0.009907 -0.042561 0.006318 0.004652
19 0.081024 -0.059100 0.036519 -0.028960 0.087271 0.031367 0.030811 0.022319 -0.049646 0.120056 ... 0.039646 0.071784 -0.002052 -0.088341 -0.055057 0.213905 -0.004053 -0.037578 0.014598 -0.007660
20 0.056182 -0.084093 0.014069 -0.026895 0.076659 0.030073 0.045181 0.016329 -0.067725 0.126174 ... 0.039546 0.076344 -0.019533 -0.092962 -0.052602 0.198689 -0.027572 -0.048209 0.009040 -0.009071
21 0.053571 -0.095846 0.008291 -0.019471 0.064259 0.029108 0.047012 0.017283 -0.056489 0.126154 ... 0.032555 0.073427 -0.021706 -0.086878 -0.046857 0.199777 -0.021215 -0.053520 0.005238 0.021200
22 0.071558 -0.136084 0.003490 -0.022550 0.063700 0.037731 0.062349 0.004351 -0.079431 0.110822 ... 0.031236 0.067542 -0.007202 -0.101361 -0.056760 0.202978 -0.048809 -0.034986 -0.011544 0.023832
23 0.065691 -0.169783 0.021695 -0.025001 0.046910 0.049084 0.069208 0.006993 -0.091945 0.112213 ... 0.028893 0.069325 -0.009712 -0.097956 -0.058715 0.190551 -0.040809 -0.026483 -0.006829 0.026214
24 0.066177 -0.182643 0.049372 -0.033324 0.070658 0.053316 0.065514 0.012374 -0.105840 0.168460 ... 0.026790 0.073149 -0.025643 -0.102207 -0.064452 0.205839 -0.033754 -0.027324 -0.010504 0.038943
25 0.061041 -0.153934 0.042748 -0.026867 0.064484 0.061130 0.070767 0.011963 -0.095513 0.168112 ... 0.030376 0.074368 -0.014989 -0.105261 -0.063696 0.221231 -0.013850 -0.027705 -0.018088 0.054007
26 0.059612 -0.147700 0.055357 -0.030931 0.049014 0.062369 0.069619 0.011679 -0.096829 0.190309 ... 0.031398 0.077948 -0.012657 -0.098528 -0.071728 0.218666 -0.012952 -0.033224 -0.025119 0.070818
27 0.053773 -0.151961 0.056535 -0.029005 0.055771 0.063346 0.063279 0.011348 -0.104837 0.192501 ... 0.026239 0.077480 -0.015772 -0.086714 -0.077326 0.230315 -0.006864 -0.019553 -0.021930 0.061497
28 0.054041 -0.149891 0.044918 -0.026554 0.074692 0.067632 0.067863 0.020604 -0.108125 0.195252 ... 0.040385 0.065313 -0.033414 -0.098097 -0.073666 0.233832 0.007704 -0.046022 -0.027752 0.046779
29 0.045164 -0.153427 0.054241 -0.023989 0.059949 0.064562 0.064689 0.023266 -0.122297 0.178473 ... 0.057019 0.072646 0.002187 -0.093837 -0.076394 0.217080 0.004019 -0.034245 -0.026299 0.041439
... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
8162 1.717853 1.192392 2.882902 3.487899 2.109535 1.201107 2.563708 0.953477 2.554737 3.300755 ... 1.913715 1.527236 0.350737 0.552813 2.689049 1.798818 2.911223 1.410518 1.165984 3.032950
8163 1.711796 1.206214 2.889794 3.481079 2.111740 1.212856 2.561494 0.959786 2.568657 3.315678 ... 1.906122 1.522690 0.349340 0.555016 2.680031 1.795097 2.910537 1.413609 1.172086 3.031985
8164 1.710422 1.228228 2.901699 3.488114 2.104864 1.208556 2.554030 0.968415 2.569323 3.325615 ... 1.911499 1.528705 0.345074 0.547417 2.681925 1.801617 2.905445 1.417821 1.188809 3.037457
8165 1.702279 1.219394 2.913121 3.551436 2.102725 1.192386 2.556594 0.969420 2.562961 3.330051 ... 1.913700 1.541383 0.338618 0.558958 2.679983 1.792281 2.903387 1.432172 1.201886 3.066869
8166 1.712132 1.240980 2.914177 3.574870 2.103347 1.202170 2.558606 0.954960 2.572567 3.320658 ... 1.920451 1.545030 0.348626 0.549721 2.677791 1.774960 2.887896 1.424324 1.211158 3.082875
8167 1.715575 1.246015 2.922378 3.555269 2.124555 1.206891 2.552830 0.957601 2.564455 3.330066 ... 1.934984 1.558858 0.347697 0.522370 2.697821 1.771091 2.887212 1.440093 1.200611 3.069152
8168 1.725370 1.223677 2.914399 3.506073 2.128136 1.212767 2.548262 0.966069 2.558911 3.315751 ... 1.945898 1.586382 0.340267 0.514223 2.684146 1.779817 2.882771 1.453740 1.194332 3.086539
8169 1.713941 1.235778 2.918214 3.525916 2.116128 1.199348 2.546043 0.969912 2.562973 3.318256 ... 1.943451 1.608107 0.341561 0.542635 2.680924 1.774335 2.886735 1.459200 1.192826 3.079857
8170 1.709359 1.231865 2.927803 3.500011 2.105553 1.205279 2.551869 0.971512 2.582064 3.316661 ... 1.951932 1.611712 0.340492 0.540113 2.670172 1.782200 2.897099 1.447463 1.185987 3.086660
8171 1.719491 1.246945 2.903392 3.489351 2.119124 1.226202 2.558098 0.972641 2.568132 3.324711 ... 1.962463 1.594240 0.349264 0.520148 2.689700 1.776032 2.894050 1.445222 1.178964 3.101300
8172 1.740012 1.245959 2.906777 3.477651 2.135719 1.277097 2.562772 0.978577 2.542310 3.331852 ... 1.960345 1.589973 0.349589 0.518387 2.698189 1.777516 2.911667 1.425513 1.177687 3.103136
8173 1.729807 1.219643 2.899775 3.491529 2.130079 1.236544 2.562560 0.982587 2.546596 3.323456 ... 1.956064 1.620721 0.348766 0.508056 2.704800 1.770333 2.919606 1.434275 1.183039 3.102602
8174 1.718933 1.210745 2.910750 3.517480 2.126082 1.218600 2.559475 0.981593 2.514313 3.309524 ... 1.961968 1.599284 0.351070 0.527563 2.704164 1.776280 2.920417 1.451165 1.176969 3.104095
8175 1.717808 1.213102 2.913250 3.519298 2.117994 1.199787 2.558143 0.979351 2.503429 3.295121 ... 1.953680 1.599271 0.355773 0.531499 2.708695 1.752487 2.916830 1.434924 1.176916 3.104908
8176 1.722743 1.183871 2.923897 3.506094 2.113593 1.158445 2.553613 0.980906 2.492097 3.298006 ... 1.957311 1.600642 0.358552 0.529156 2.708258 1.747904 2.943186 1.431195 1.187323 3.113977
8177 1.723803 1.190271 2.938394 3.544203 2.104305 1.169919 2.558802 0.981909 2.491405 3.302360 ... 1.968490 1.598855 0.345620 0.530107 2.704321 1.758953 2.971511 1.433173 1.184866 3.130856
8178 1.724537 1.194596 2.947361 3.502391 2.093831 1.238416 2.562108 0.988973 2.501342 3.275150 ... 1.968321 1.566871 0.343685 0.525241 2.685276 1.772432 2.971388 1.423096 1.175002 3.103260
8179 1.712358 1.202508 2.949024 3.504085 2.096500 1.251252 2.557319 0.979305 2.513997 3.260590 ... 1.971395 1.565503 0.357834 0.535269 2.703361 1.778279 2.973840 1.433276 1.175030 3.116624
8180 1.716451 1.208552 2.954360 3.469367 2.078315 1.244856 2.543937 0.976672 2.511233 3.234488 ... 1.969109 1.550801 0.367796 0.524914 2.710957 1.783495 2.959393 1.428194 1.177457 3.127203
8181 1.713504 1.237760 2.978823 3.449671 2.084450 1.195694 2.548064 0.970573 2.530166 3.233329 ... 1.966067 1.534260 0.379385 0.525076 2.719187 1.777532 2.970024 1.409519 1.175973 3.142977
8182 1.719402 1.247090 2.988152 3.462148 2.081863 1.200637 2.551011 0.962000 2.516750 3.247157 ... 1.962246 1.528417 0.379790 0.525244 2.707564 1.772166 2.956730 1.424852 1.188000 3.131215
8183 1.711624 1.265673 2.987509 3.426577 2.067946 1.198160 2.551546 0.965494 2.511379 3.249944 ... 1.964355 1.531765 0.375063 0.528530 2.698897 1.770220 2.979214 1.427085 1.184026 3.101037
8184 1.730760 1.233695 2.981490 3.465847 2.050306 1.235005 2.563649 0.983028 2.559243 3.228629 ... 1.973355 1.525892 0.361800 0.527919 2.698382 1.772279 2.972717 1.413199 1.189200 3.119174
8185 1.721821 1.255970 2.983413 3.462609 2.084170 1.224361 2.568950 0.984889 2.554961 3.211362 ... 1.970601 1.535004 0.353183 0.540752 2.686391 1.776645 2.972119 1.412435 1.183045 3.102896
8186 1.727650 1.252505 2.985093 3.465114 2.094524 1.270519 2.575626 0.986018 2.574561 3.201158 ... 1.968077 1.541476 0.355205 0.530367 2.681521 1.781989 2.971376 1.411923 1.177402 3.102765
8187 1.725162 1.239423 2.981158 3.467504 2.094408 1.304094 2.569792 1.007958 2.524640 3.208437 ... 1.963459 1.537971 0.375753 0.532783 2.676530 1.782059 2.973926 1.428345 1.178798 3.098710
8188 1.744157 1.271165 2.962132 3.477721 2.100682 1.333937 2.585999 1.020706 2.523170 3.221898 ... 1.944990 1.525900 0.370573 0.531437 2.662314 1.785053 2.978468 1.435901 1.189362 3.106353
8189 1.745921 1.272211 2.964836 3.493604 2.111946 1.311877 2.585820 1.019604 2.506918 3.183591 ... 1.943190 1.530085 0.360403 0.525353 2.663892 1.781260 2.971473 1.435197 1.192302 3.107395
8190 1.750290 1.269579 2.958444 3.486699 2.111596 1.310843 2.585202 1.006867 2.520136 3.200074 ... 1.946026 1.528536 0.357842 0.523150 2.658298 1.764851 2.966405 1.447472 1.194578 3.123202
8191 1.747766 1.267008 2.948793 3.485121 2.099267 1.286276 2.589647 0.991657 2.522432 3.199299 ... 1.937353 1.534243 0.362311 0.517297 2.656752 1.746493 2.958554 1.442740 1.168839 3.152387

8192 rows × 10000 columns

In [993]:
attempt2.to_csv('Sweden_attempt.csv')
In [155]:
plt.figure(figsize=(24,10))

plt.plot(df.Price_SWEDEN, color="black", linewidth=2, label="Real Swedish price path")

plt.legend()

for i in range(0,len(attempt2.columns)):
    K=13
    plt.title(str(attempt2_length) + " MMAR generated price paths")
    plt.xticks(np.arange(0, 2**(K)+1, 2**(K-3)))
    plt.xlabel("Time\n(days)")
    plt.ylabel('Price level')
    plt.grid(b=True)
    plt.plot(np.arange(0, 2**K, 1) , df.Price_SWEDEN[0]*np.exp(attempt2[i]), color="goldenrod", linewidth=0.1, alpha=0.2)
    
plt.plot(df.Price_SWEDEN, color="black", linewidth=2)
    
plt.show()
In [1052]:
plt.grid(b=True)
plt.title("Histogram of simulated final prices\nafter 8192 days for Sweden")
plt.hist(df.Price_SWEDEN[0]*np.exp(attempt2.iloc[8191]), bins=40, color="goldenrod")
plt.xlabel("Final price (SEK)")
plt.ylabel("Frequency")
plt.axvline(df.Price_SWEDEN[0]*np.exp(attempt2.iloc[8191].quantile(0.05)), linestyle='--', color="purple", label="5th percentile")
plt.axvline(df.Price_SWEDEN[0]*np.exp(attempt2.iloc[8191].quantile(0.25)), linestyle='--', color="blue", label="25th percentile")
plt.axvline(df.Price_SWEDEN[0]*np.exp(attempt2.iloc[8191].quantile(0.5)), linestyle='--', color="forestgreen", label="50th percentile")
plt.axvline(df.Price_SWEDEN[0]*np.exp(attempt2.iloc[8191].quantile(0.75)), linestyle='--', color="gold", label="75th percentile")
plt.axvline(df.Price_SWEDEN[0]*np.exp(attempt2.iloc[8191].quantile(0.95)), linestyle='--', color="red", label="95th percentile")
plt.axvline(df.Price_SWEDEN.iloc[-1], linestyle='--', color="black", label="real final price")

plt.legend()
plt.show()
In [996]:
# Here we calculate the LN differences

diff_attempt2 = [[0 for x in range(0,attempt2_length)] for y in range (0,8192)]
for i in range(0,attempt2_length):
    
    if i%(attempt2_length/attempt2_length) == 0:
        print("i="+str(i))
    for j in range (1,8192):
        diff_attempt2[j][i]=attempt2[i][j] - attempt2[i][j-1]
    clear_output()
    
diff_attempt2= pd.DataFrame(diff_attempt2)

diff_attempt2.to_csv('Sweden_diff_attempt.csv')
In [997]:
# Here we show the original graph as a reminder

plt.figure(figsize=(24,5))
plt.title("Price increments for Sweden")
plt.xticks(np.arange(0, 2**(13)+1, 2**(13-3)))

plt.xlabel("Time\n(days)")
plt.grid(b=True)
plt.ylabel('Change\n(%)')

axes = plt.gca()
axes.set_ylim([-0.175,0.175])

plt.plot(df.LN_SWEDEN, color="gold", linewidth=0.5)
plt.show()
In [998]:
# Here we show the first twenty graphs of price changes

for i in range(0,20):
    plt.figure(figsize=(24,5))
    plt.title("Price increments for simulation " + str(i+1))
    plt.xticks(np.arange(0, 2**(13)+1, 2**(13-3)))

    plt.xlabel("Time\n(days)")
    plt.grid(b=True)
    plt.ylabel('Change\n(%)')
    
    axes = plt.gca()
    axes.set_ylim([-0.175,0.175])
    
    plt.plot(diff_attempt2[i], color="orange", linewidth=0.5)
    plt.show()
In [999]:
print("Here we can see some statistics about the price changes in the Swedish stock market.\n")

df.LN_SWEDEN.describe()
Here we can see some statistics about the price changes in the Swedish stock market.

Out[999]:
count    7560.000000
mean        0.000325
std         0.014584
min        -0.088003
25%        -0.007073
50%         0.000705
75%         0.007922
max         0.110216
Name: LN_SWEDEN, dtype: float64
In [1000]:
df.LN_SWEDEN.std()
Out[1000]:
0.014583857213811736
In [1036]:
plt.grid(b=True)
plt.title("Mean price change for \n" + str(attempt2_length) + " MMAR generated price paths\nfor SWEDEN")
plt.hist(diff_attempt2.mean(), bins=30, color='goldenrod')
plt.axvline(df.LN_SWEDEN.mean(), linestyle='--', color="black", label="real mean\n= "+str(round(df.LN_SWEDEN.mean(), 6)))
plt.axvline(0, linestyle='--', color="forestgreen", label="mean = zero")
plt.ylabel("Frequency per " + str(attempt2_length))
plt.xlabel("Mean")
plt.legend()
plt.show()
print((diff_attempt2.mean()).describe())

plt.grid(b=True)
plt.title("Standard deviation of price changes for \n" + str(attempt2_length) + " MMAR generated price paths\nfor SWEDEN")
plt.hist(diff_attempt2.std(), bins=30, color='goldenrod')
plt.axvline(df.LN_SWEDEN.std(), linestyle='--', color="black", label="real standard deviation\n= "+str(round(df.LN_SWEDEN.std(), 4)))
plt.ylabel("Frequency per " + str(attempt2_length))
plt.xlabel("Standard Deviation")
plt.legend()
plt.show()


print("\n\nBelow is a description of the standard deviations of our " + str(attempt2_length) + " simulations.\n")
(diff_attempt2.std()).describe()
count    10000.000000
mean         0.000264
std          0.000102
min         -0.000031
25%          0.000193
50%          0.000275
75%          0.000347
max          0.000455
dtype: float64

Below is a description of the standard deviations of our 10000 simulations.

Out[1036]:
count    10000.000000
mean         0.014604
std          0.000185
min          0.013831
25%          0.014475
50%          0.014601
75%          0.014723
max          0.015437
dtype: float64
In [1002]:
print("The mean is obviously very variable - we can simulate both downward and upward markets.\n\nWe can see that the standard deviation is also quite variable, but it is mostly near to what we expect.")
The mean is obviously very variable - we can simulate both downward and upward markets.

We can see that the standard deviation is also quite variable, but it is mostly near to what we expect.
In [1003]:
print("Here's the kurtosis for the real data for Sweden.")

stats.kurtosis(df.LN_SWEDEN[1:-1])
Here's the kurtosis for the real data for Sweden.
Out[1003]:
4.362479840016163
In [1180]:
plt.figure(figsize=(16,4))
plt.grid(b=True)
plt.title("Kurtosis of price changes for \n" + str(attempt2_length) + " MMAR generated price paths\nfor Sweden")
plt.hist(stats.kurtosis(diff_attempt2), bins=100, color='goldenrod')
plt.axvline(stats.kurtosis(df.LN_SWEDEN[1:-1]), linestyle='--', color="black", label="real kurtosis\n= "+str(round(stats.kurtosis(df.LN_SWEDEN[1:-1]), 3)))
plt.axvline(0, linestyle='--', color="aqua", label="Gaussian kurtosis = 0")

kurtosis_array_diff_attempt2 = stats.kurtosis(diff_attempt2)
plt.axvline(np.quantile(kurtosis_array_diff_attempt2, 0.05), linestyle='--', color="purple", label="5th percentile")
plt.axvline(np.quantile(kurtosis_array_diff_attempt2,0.25), linestyle='--', color="blue", label="25th percentile")
plt.axvline(np.quantile(kurtosis_array_diff_attempt2,0.5), linestyle='--', color="forestgreen", label="50th percentile")
plt.axvline(np.quantile(kurtosis_array_diff_attempt2,0.75), linestyle='--', color="gold", label="75th percentile")
plt.axvline(np.quantile(kurtosis_array_diff_attempt2,0.95), linestyle='--', color="red", label="95th percentile")

plt.ylabel("Frequency per " + str(attempt2_length))
plt.xlabel("Kurtosis")
plt.legend()
plt.show()
In [1005]:
print("But at least the kurtosis is about right!")
But at least the kurtosis is about right!
In [1006]:
print("Below is the mean kurtosis of the price changes for every simulation.\nKeep in mind that it is overweighted by the heavy right tail.\n")

stats.kurtosis(diff_attempt2).mean()
Below is the mean kurtosis of the price changes for every simulation.
Keep in mind that it is overweighted by the heavy right tail.

Out[1006]:
4.434461371314952
In [1007]:
print("The median kurtosis is a little less than the real, which was "+str(stats.kurtosis(df.LN_SWEDEN[1:-1]))+".\n")

np.median(stats.kurtosis(diff_attempt2))
The median kurtosis is a little less than the real, which was 4.362479840016163.

Out[1007]:
4.053667250500096
In [1008]:
# Here we test how well the simulations fit the original data, using the Kolmogorov-Smirnov test to test the log price changes

ks_test_SWEDEN = [0 for x in range(0, attempt2_length)]
for i in range(0,attempt2_length):
    ks_test_SWEDEN[i] = stats.ks_2samp(diff_attempt2[i], df.LN_SWEDEN).pvalue
    

plt.grid(b=True)
plt.hist(ks_test_SWEDEN, bins=20, color='goldenrod')
plt.title("Histogram of Kolmogorov-Smirnov P-values\nfor " + str(attempt2_length) + " MMAR simulations for Sweden ")
plt.axvline(0.05, linestyle='--', color="black", label="5% significance")
plt.xlabel("P-Value")
plt.ylabel("Frequency per " + str(attempt2_length))
plt.legend()
plt.show()

plt.grid(b=True)
plt.plot(sorted(ks_test_SWEDEN), color='goldenrod')
plt.title("Sorted Kolmogorov-Smirnov P-values\nfor " + str(attempt2_length) + " MMAR simulations for Sweden ")
plt.axhline(0.05, linestyle='--', color="black", label="5% significance")
plt.axvline(sum(x<0.05 for x in ks_test_SWEDEN), linestyle='--', color="orange", label="Total rejections = "+str(sum(x<0.05 for x in ks_test_SWEDEN)))
plt.xlabel("Index ouf of " + str(attempt2_length))
plt.ylabel("P-Value")
plt.legend()
plt.show()

print("Count of P<0.05 = "+ str(sum(x<0.05 for x in ks_test_SWEDEN)) + " failures out of " + str(attempt2_length) + ".")
Count of P<0.05 = 5587 failures out of 10000.
In [1009]:
print("We can see that most simulations fail the Kolmogorov-Smirnov test, which is a shame.\n")
print("That said, the test may be overly sensitive.\n")
print("But unfortunately, the authors don't provide any better way to test if an MMAR simulation fits the real data.")
We can see that most simulations fail the Kolmogorov-Smirnov test, which is a shame.

That said, the test may be overly sensitive.

But unfortunately, the authors don't provide any better way to test if an MMAR simulation fits the real data.
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [1010]:
print("In the same way that we expect stocks to go downwards, we may expect lending rates to move in a certain direction.")
print("\nHere, we reuse our modified MMAR for stocks to simulate bond yields going down over thirty years.")
print("In the same way as for stocks, we can modify our Xt's by adding the mean change from the real yield changes over thirty years.")
In the same way that we expect stocks to go downwards, we may expect lending rates to move in a certain direction.

Here, we reuse our modified MMAR for stocks to simulate bond yields going down over thirty years.
In the same way as for stocks, we can modify our Xt's by adding the mean change from the real yield changes over thirty years.
In [1011]:
from IPython.display import clear_output

attempt3_length = 10000

attempt3=pd.DataFrame([0 for x in range(0,2**13)])

magnitude_LIBOR = 4

FBM_class_LIBOR = FBM(n = 10*2**13+1, hurst = H_LIBOR, length = magnitude_LIBOR, method='daviesharte')

start_time = datetime.now().strftime("%I:%M%p")

for i in range(0,attempt3_length):
    print("Having started at "+str(start_time))
    if i%(attempt3_length/attempt3_length)==0:
        print("Currently at i=" + str(i) + " out of " + str(attempt3_length))
    if i%101==0:
        attempt3[i] = pd.DataFrame(MMAR_STOCKS(13, H_LIBOR, lambda_LIBOR, sigma_LIBOR, df.Price_LIBOR, magnitude_LIBOR, GRAPHS=False, FBMCLASS=FBM_class_LIBOR, FBM_TOP_LIMIT=20, FBM_BOTTOM_LIMIT=0.1, MEAN_RETURN=df.LN_LIBOR.mean()))
    else:
        attempt3[i] = pd.DataFrame(MMAR_STOCKS(13, H_LIBOR, lambda_LIBOR, sigma_LIBOR, df.Price_LIBOR, magnitude_LIBOR, GRAPHS=False, FBMCLASS=FBM_class_LIBOR, FBM_TOP_LIMIT=20, FBM_BOTTOM_LIMIT=0.1, MEAN_RETURN=df.LN_LIBOR.mean()))
    clear_output()

print("Having started at "+str(start_time))    

attempt3
Having started at 06:35PM
Out[1011]:
0 1 2 3 4 5 6 7 8 9 ... 9990 9991 9992 9993 9994 9995 9996 9997 9998 9999
0 -0.006085 0.000198 0.017357 0.001478 -0.002785 0.000360 0.005255 0.007278 -0.011759 -0.005731 ... -0.001646 0.003740 -0.002696 0.002113 0.003772 0.005266 -0.001307 0.001294 0.013085 0.016313
1 -0.002982 -0.000142 0.017528 -0.001177 -0.005290 0.002091 0.010725 0.002889 0.002684 -0.009315 ... -0.004020 0.006519 -0.005387 0.001990 0.009146 0.003628 0.004121 0.003907 0.023379 0.019145
2 -0.008014 -0.001295 0.013652 0.005270 -0.007156 -0.000314 0.025287 -0.038624 -0.017861 -0.002336 ... -0.009327 0.013212 -0.004621 0.005357 0.008749 0.003046 0.003245 0.003585 0.029982 0.024835
3 -0.038169 -0.004733 0.018479 0.003118 -0.009217 -0.002852 0.026084 -0.033595 -0.025102 0.007250 ... -0.013536 0.013169 -0.007317 0.008292 0.016158 0.004490 0.001510 0.006512 0.031848 0.047945
4 -0.033958 -0.008491 0.011054 0.007997 -0.008656 -0.013280 0.018455 -0.030605 -0.032139 0.006317 ... -0.015315 0.017649 -0.006817 0.012819 0.012077 0.007684 0.009573 0.009570 0.031892 0.052807
5 -0.012915 -0.012069 0.012467 0.025437 -0.012226 -0.006728 0.016727 -0.031441 -0.012569 0.001049 ... -0.021378 0.021866 -0.006902 0.017603 0.015384 0.008636 0.004987 0.006398 0.019711 0.062439
6 -0.024787 -0.015252 0.025659 0.025772 -0.013149 -0.005882 0.013150 -0.028933 -0.025970 -0.006799 ... -0.024208 0.024949 -0.007731 0.025996 0.010614 0.000135 0.002740 0.003816 0.019886 0.070993
7 -0.048935 -0.011893 0.019847 0.027193 -0.012518 -0.007316 0.007749 -0.030123 -0.046800 -0.002244 ... -0.022711 0.028847 -0.008054 0.018746 0.005597 -0.004151 -0.006049 0.001460 0.024899 0.065013
8 -0.050991 -0.009866 0.021681 0.031184 -0.019307 -0.003163 0.004984 -0.025983 -0.052472 0.002679 ... -0.034619 0.033988 -0.012907 0.015889 0.006437 -0.023883 -0.004080 0.003023 0.020087 0.063729
9 -0.060674 -0.008193 0.024982 0.035191 -0.022566 -0.003486 0.005875 -0.025247 -0.058169 0.006950 ... -0.033770 0.030775 -0.015469 0.016945 0.014676 -0.022718 -0.005746 0.002700 0.021592 0.064158
10 -0.042743 -0.007118 0.024390 0.035212 -0.025234 -0.003549 0.005239 -0.030143 -0.067494 0.028062 ... -0.041671 0.035335 -0.019065 0.015553 0.012110 -0.027679 -0.010533 0.005440 0.016239 0.079459
11 -0.053251 -0.008825 0.020783 0.033671 -0.034223 -0.007016 0.012400 -0.024634 -0.071664 0.035179 ... -0.049161 0.033067 -0.018466 0.010911 0.010987 -0.037018 -0.011449 0.006721 0.010580 0.082345
12 -0.050854 -0.007968 0.006629 0.033616 -0.039120 -0.004577 0.010693 -0.022024 -0.088282 0.036166 ... -0.049854 0.030881 -0.018343 0.011568 0.010033 -0.031158 -0.019231 0.006399 0.008186 0.086983
13 -0.051928 -0.009294 -0.000079 0.031812 -0.044412 -0.003855 0.009247 -0.028300 -0.085586 0.041620 ... -0.057364 0.023420 -0.019561 0.013638 0.012909 -0.037199 -0.015044 0.006076 0.001982 0.093592
14 -0.039068 -0.008974 -0.005263 0.032127 -0.043062 -0.000983 0.000838 -0.026204 -0.088287 0.041216 ... -0.059159 0.018151 -0.016811 0.013316 0.014383 -0.032682 -0.025058 0.008651 0.003451 0.096722
15 -0.030998 -0.005313 -0.004174 0.028334 -0.043595 0.004367 -0.002430 -0.030710 -0.093622 0.043916 ... -0.063899 0.015904 -0.015754 0.014533 0.014873 -0.037913 -0.025377 0.008328 0.000999 0.094825
16 -0.026946 -0.007551 -0.008687 0.022370 -0.043544 0.007807 0.001141 -0.029136 -0.106764 0.035681 ... -0.065267 0.016050 -0.017646 0.027803 0.015185 -0.031874 -0.030668 0.007907 -0.001526 0.094393
17 -0.038559 -0.003487 0.008921 0.021104 -0.046049 0.009262 0.002811 -0.024405 -0.109184 0.035849 ... -0.061787 0.015513 -0.020672 0.023648 0.016897 -0.035170 -0.033419 0.006448 0.014225 0.096818
18 -0.027150 -0.001254 0.007900 0.025213 -0.045466 0.010299 -0.002394 -0.034355 -0.098957 0.038524 ... -0.063026 0.020849 -0.020569 0.031244 0.021499 -0.040400 -0.043494 0.003225 0.026123 0.091875
19 -0.031648 0.002425 0.002387 0.031040 -0.045378 0.009976 -0.008561 -0.040666 -0.105634 0.041744 ... -0.064725 0.021035 -0.020892 0.036360 0.021204 -0.049015 -0.046444 0.000642 0.012939 0.077644
20 -0.027822 0.002009 -0.030069 0.031468 -0.041077 0.009831 -0.010194 -0.068038 -0.107901 0.050768 ... -0.063675 0.020841 -0.017794 0.074349 0.022015 -0.040666 -0.046979 0.000320 0.014459 0.077212
21 -0.041896 0.001209 -0.028347 0.027795 -0.041764 0.009800 -0.016567 -0.080420 -0.109293 0.061750 ... -0.063627 0.022295 -0.016050 0.080154 0.019916 -0.038945 -0.052179 0.000117 0.031634 0.080052
22 -0.047691 0.002992 -0.023923 0.027572 -0.041818 0.007008 -0.011413 -0.083711 -0.103687 0.063888 ... -0.065594 0.021973 -0.014172 0.078920 0.018518 -0.035500 -0.041715 -0.000205 0.032717 0.074541
23 -0.058212 0.014684 -0.011350 0.024090 -0.039869 0.006335 -0.009884 -0.077586 -0.099494 0.061247 ... -0.066404 0.024983 -0.013803 0.080009 0.017091 -0.038049 -0.056236 -0.000528 0.030009 0.072108
24 -0.054768 0.030137 0.018628 0.024907 -0.037736 0.006012 -0.004579 -0.080179 -0.089591 0.055686 ... -0.067261 0.025063 -0.012222 0.089924 0.019015 -0.041799 -0.058644 -0.000915 0.037342 0.061820
25 -0.116420 0.027581 0.006367 0.025728 -0.038800 0.007569 -0.002724 -0.081283 -0.085641 0.062635 ... -0.068708 0.026216 -0.014040 0.097577 0.018890 -0.036275 -0.065515 -0.001237 0.036530 0.058758
26 -0.141510 0.037324 0.004827 0.025878 -0.038670 0.005861 -0.015755 -0.090550 -0.084792 0.061813 ... -0.068971 0.025442 -0.015307 0.095949 0.013713 -0.031875 -0.075388 -0.000741 0.031028 0.056487
27 -0.175374 0.034549 0.001732 0.025773 -0.038406 0.004489 -0.012575 -0.081482 -0.081050 0.070334 ... -0.067147 0.023194 -0.014566 0.084844 0.019799 -0.033366 -0.073512 -0.001063 0.025933 0.056364
28 -0.192620 0.037176 0.006420 0.025888 -0.040876 -0.001761 -0.013316 -0.079146 -0.062542 0.071168 ... -0.070899 0.023706 -0.015420 0.079611 0.021224 -0.032248 -0.066311 -0.001386 0.023611 0.051349
29 -0.202649 0.043661 0.042195 0.025259 -0.036275 -0.006713 -0.012841 -0.073301 -0.059157 0.083932 ... -0.069693 0.025975 -0.008005 0.080606 0.021024 -0.034030 -0.066337 0.000782 0.024982 0.051670
... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
8162 -4.039655 -3.262541 -2.543495 -3.579142 -2.451109 -2.085882 -1.023863 -1.818310 -1.984821 -1.325462 ... -3.596933 -2.989247 -3.077168 -4.413944 -0.896948 -1.026187 -1.569831 -2.805014 -1.097672 -3.637143
8163 -4.041964 -3.262313 -2.543445 -3.603053 -2.460664 -2.086204 -1.019572 -1.818283 -1.985449 -1.325109 ... -3.597256 -2.985097 -3.079922 -4.408881 -0.902909 -1.028343 -1.571669 -2.807100 -1.099112 -3.630438
8164 -4.040177 -3.268338 -2.543784 -3.603937 -2.485588 -2.084593 -1.000164 -1.817281 -1.998708 -1.325305 ... -3.595270 -2.984310 -3.086686 -4.408544 -0.906042 -1.024750 -1.570922 -2.807423 -1.090703 -3.630313
8165 -4.040214 -3.269279 -2.546950 -3.614501 -2.496374 -2.082813 -0.999412 -1.817835 -2.005437 -1.326194 ... -3.595592 -2.983798 -3.087641 -4.410519 -0.906365 -1.024619 -1.566142 -2.808690 -1.091156 -3.627087
8166 -4.039094 -3.275981 -2.551188 -3.655893 -2.500624 -2.078741 -0.996755 -1.819625 -2.007282 -1.325938 ... -3.595915 -2.985211 -3.087775 -4.410952 -0.904279 -1.025449 -1.566152 -2.809013 -1.064708 -3.627410
8167 -4.046347 -3.279828 -2.550974 -3.649065 -2.500842 -2.077564 -1.005327 -1.823546 -2.003068 -1.327062 ... -3.594821 -2.985856 -3.088557 -4.410683 -0.904038 -1.025771 -1.563332 -2.808043 -1.059173 -3.627219
8168 -4.042406 -3.280151 -2.551620 -3.630620 -2.502830 -2.076773 -0.990324 -1.824747 -1.995946 -1.326208 ... -3.595144 -2.980103 -3.088483 -4.411510 -0.904784 -1.021532 -1.558426 -2.808366 -1.055767 -3.627541
8169 -4.043441 -3.280544 -2.550424 -3.640921 -2.505975 -2.077720 -0.983414 -1.831480 -1.993715 -1.324177 ... -3.592380 -2.983685 -3.089906 -4.410940 -0.905069 -1.021872 -1.550067 -2.806440 -1.040273 -3.627228
8170 -4.040828 -3.283824 -2.551740 -3.644403 -2.507444 -2.075879 -0.979037 -1.834458 -1.988445 -1.323044 ... -3.592703 -2.981030 -3.095035 -4.408349 -0.905014 -1.023391 -1.554312 -2.805094 -1.020360 -3.627254
8171 -4.042086 -3.280114 -2.551068 -3.645249 -2.506667 -2.075120 -0.985068 -1.833852 -1.988155 -1.323028 ... -3.593285 -2.989028 -3.096194 -4.408907 -0.906235 -1.032250 -1.551869 -2.805416 -1.025805 -3.626085
8172 -4.046149 -3.278303 -2.553247 -3.638299 -2.509235 -2.072489 -0.978817 -1.833261 -1.987769 -1.321446 ... -3.593608 -2.992170 -3.097093 -4.408174 -0.908433 -1.033239 -1.555263 -2.806720 -0.997958 -3.626408
8173 -4.053344 -3.277355 -2.553018 -3.651886 -2.510738 -2.071130 -0.977999 -1.832361 -1.998412 -1.321271 ... -3.594085 -2.995416 -3.097823 -4.406404 -0.907850 -1.032590 -1.554218 -2.807555 -0.995026 -3.625543
8174 -4.060477 -3.277625 -2.556305 -3.652781 -2.512429 -2.072274 -0.969142 -1.832674 -1.999220 -1.320810 ... -3.594407 -2.989697 -3.099673 -4.407667 -0.904423 -1.032913 -1.554805 -2.806558 -0.979794 -3.622476
8175 -4.069521 -3.278310 -2.561975 -3.655623 -2.518800 -2.072596 -0.968407 -1.835276 -1.998430 -1.324458 ... -3.593997 -2.986378 -3.100383 -4.409462 -0.904101 -1.034737 -1.559349 -2.809547 -0.995132 -3.621243
8176 -4.075820 -3.275472 -2.564033 -3.660850 -2.528753 -2.070811 -0.977114 -1.843122 -2.005863 -1.323885 ... -3.594319 -2.987345 -3.105721 -4.406390 -0.903600 -1.029191 -1.556690 -2.809870 -1.012401 -3.623137
8177 -4.082685 -3.274543 -2.566620 -3.662211 -2.529994 -2.074567 -0.977409 -1.846087 -2.016299 -1.323870 ... -3.593140 -2.985923 -3.107394 -4.407005 -0.900139 -1.029798 -1.558801 -2.810192 -1.032909 -3.621984
8178 -4.081561 -3.282030 -2.568176 -3.661526 -2.528407 -2.073918 -0.972692 -1.849540 -2.006500 -1.321007 ... -3.593463 -2.988178 -3.108162 -4.407327 -0.900188 -1.033154 -1.558590 -2.812213 -1.021579 -3.621082
8179 -4.083172 -3.278553 -2.569275 -3.655368 -2.538851 -2.074240 -0.975081 -1.855763 -2.006396 -1.321330 ... -3.593785 -2.984804 -3.108470 -4.408452 -0.898194 -1.034231 -1.559577 -2.814945 -1.007048 -3.620558
8180 -4.081280 -3.275279 -2.571570 -3.642082 -2.540078 -2.074780 -0.987284 -1.850042 -2.009626 -1.319854 ... -3.591660 -2.988490 -3.106288 -4.404139 -0.898517 -1.031521 -1.558701 -2.817682 -1.035016 -3.621710
8181 -4.081115 -3.284285 -2.572440 -3.645279 -2.544048 -2.076335 -0.960530 -1.851650 -2.007986 -1.318582 ... -3.591983 -2.983119 -3.103303 -4.401559 -0.899973 -1.029087 -1.566788 -2.820138 -1.112674 -3.623266
8182 -4.080066 -3.284748 -2.570975 -3.639314 -2.550634 -2.080990 -0.957117 -1.852204 -2.008232 -1.318905 ... -3.587296 -2.980758 -3.102841 -4.401748 -0.897990 -1.030578 -1.574095 -2.816382 -1.130508 -3.621518
8183 -4.081422 -3.280448 -2.571760 -3.652032 -2.563006 -2.081312 -0.955048 -1.859162 -2.009180 -1.319227 ... -3.587618 -2.975526 -3.101622 -4.401086 -0.897130 -1.024493 -1.573268 -2.814085 -1.148454 -3.619093
8184 -4.079946 -3.276980 -2.573870 -3.661320 -2.569430 -2.082486 -0.956619 -1.864775 -2.011269 -1.321810 ... -3.587941 -2.973781 -3.102038 -4.400946 -0.898093 -1.026545 -1.571517 -2.813182 -1.150472 -3.614468
8185 -4.078773 -3.274230 -2.571021 -3.664440 -2.572273 -2.084149 -0.963941 -1.863527 -2.015704 -1.325706 ... -3.588642 -2.973234 -3.103580 -4.390829 -0.899333 -1.032551 -1.568842 -2.811513 -1.159149 -3.612883
8186 -4.076953 -3.273508 -2.569027 -3.670219 -2.589215 -2.084471 -0.961909 -1.858262 -2.013937 -1.325809 ... -3.588964 -2.972999 -3.103332 -4.386943 -0.897703 -1.035865 -1.568871 -2.810417 -1.178983 -3.614519
8187 -4.079952 -3.273681 -2.569392 -3.685922 -2.597071 -2.084794 -0.962781 -1.860871 -2.014497 -1.328483 ... -3.585830 -2.975710 -3.103655 -4.373993 -0.894713 -1.031477 -1.575901 -2.809402 -1.193668 -3.615425
8188 -4.083685 -3.282857 -2.568852 -3.687587 -2.597726 -2.088583 -0.964161 -1.861798 -2.017523 -1.330628 ... -3.586153 -2.976508 -3.108204 -4.371117 -0.895036 -1.034551 -1.564062 -2.811875 -1.218369 -3.615747
8189 -4.086043 -3.286014 -2.574218 -3.680616 -2.602448 -2.090513 -0.971539 -1.867198 -2.018454 -1.331662 ... -3.584296 -2.978787 -3.110444 -4.369275 -0.895127 -1.035727 -1.575279 -2.811314 -1.236759 -3.618295
8190 -4.088344 -3.283813 -2.577646 -3.684031 -2.612144 -2.090835 -0.974601 -1.884968 -2.021678 -1.333631 ... -3.584619 -2.974569 -3.112712 -4.365070 -0.895301 -1.032822 -1.585689 -2.812606 -1.229517 -3.618325
8191 -4.092212 -3.275677 -2.587378 -3.684831 -2.614976 -2.090245 -0.973129 -1.921791 -2.040862 -1.334614 ... -3.584967 -2.975998 -3.122740 -4.365183 -0.898272 -1.037080 -1.581957 -2.813283 -1.223731 -3.619335

8192 rows × 10000 columns

In [1012]:
attempt3.to_csv('LIBOR_attempt.csv')
In [149]:
plt.figure(figsize=(24,10))

plt.plot(df.Price_LIBOR, color="black", linewidth=2, label="Real LIBOR price path")

plt.legend()

for i in range(0,len(attempt3.columns)):
    K=13
    plt.title(str(attempt3_length) + " MMAR generated price paths")
    plt.xticks(np.arange(0, 2**(K)+1, 2**(K-3)))
    plt.xlabel("Time\n(days)")
    plt.ylabel('Price level')
    plt.grid(b=True)
    plt.plot(np.arange(0, 2**K, 1) , df.Price_LIBOR[0]*np.exp(attempt3[i]), color="cornflowerblue", linewidth=0.1, alpha=0.2)
    
plt.plot(df.Price_LIBOR, color="black", linewidth=2)
    
plt.show()
In [1055]:
plt.grid(b=True)
plt.title("Histogram of simulated final yields\nafter 8192 days for LIBOR")
plt.hist(df.Price_LIBOR[0]*np.exp(attempt3.iloc[8191]), bins=40, color="cornflowerblue")
plt.xlabel("Final yield (%)")
plt.ylabel("Frequency")
plt.axvline(df.Price_LIBOR[0]*np.exp(attempt3.iloc[8191].quantile(0.05)), linestyle='--', color="purple", label="5th percentile")
plt.axvline(df.Price_LIBOR[0]*np.exp(attempt3.iloc[8191].quantile(0.25)), linestyle='--', color="blue", label="25th percentile")
plt.axvline(df.Price_LIBOR[0]*np.exp(attempt3.iloc[8191].quantile(0.5)), linestyle='--', color="forestgreen", label="50th percentile")
plt.axvline(df.Price_LIBOR[0]*np.exp(attempt3.iloc[8191].quantile(0.75)), linestyle='--', color="gold", label="75th percentile")
plt.axvline(df.Price_LIBOR[0]*np.exp(attempt3.iloc[8191].quantile(0.95)), linestyle='--', color="red", label="95th percentile")
plt.axvline(df.Price_LIBOR.iloc[-1], linestyle='--', color="black", label="real final price")
plt.legend()
plt.show()
In [1014]:
# Here we calculate the LN differences

diff_attempt3 = [[0 for x in range(0,attempt3_length)] for y in range (0,8192)]
for i in range(0,attempt3_length):
    if i%(attempt3_length/attempt3_length) == 0:
        print("i="+str(i))
    for j in range (1,8192):
        diff_attempt3[j][i]=attempt3[i][j] - attempt3[i][j-1]
    clear_output()
    
    
diff_attempt3= pd.DataFrame(diff_attempt3)

diff_attempt3.to_csv('LIBOR_diff_attempt.csv')
In [1015]:
# Here we show the original graph as a reminder

plt.figure(figsize=(24,5))
plt.title("Price increments for LIBOR")
plt.xticks(np.arange(0, 2**(13)+1, 2**(13-3)))

plt.xlabel("Time\n(days)")
plt.grid(b=True)
plt.ylabel('Change\n(%)')

axes = plt.gca()
axes.set_ylim([-0.25,0.25])

plt.plot(df.LN_LIBOR, color="blue", linewidth=0.5)
plt.show()
In [1016]:
# Here we show the first twenty graphs of price changes

for i in range(0,20):
    plt.figure(figsize=(24,5))
    plt.title("Price increments for simulation " + str(i+1))
    plt.xticks(np.arange(0, 2**(13)+1, 2**(13-3)))

    plt.xlabel("Time\n(days)")
    plt.grid(b=True)
    plt.ylabel('Change\n(%)')
    
    axes = plt.gca()
    axes.set_ylim([-0.25,0.25])
    
    plt.plot(diff_attempt3[i], color="forestgreen", linewidth=0.5)
    plt.show()
In [1017]:
print("Here we can see some statistics about the price changes in the LIBOR rate in British pounds.\n")

df.LN_LIBOR.describe()
Here we can see some statistics about the price changes in the LIBOR rate in British pounds.

Out[1017]:
count    7560.000000
mean       -0.000322
std         0.008319
min        -0.211438
25%        -0.002871
50%         0.000000
75%         0.002120
max         0.097980
Name: LN_LIBOR, dtype: float64
In [1018]:
df.LN_LIBOR.std()
Out[1018]:
0.008318845781112827
In [1043]:
plt.grid(b=True)
plt.title("Mean yield change for \n" + str(attempt3_length) + " MMAR generated price paths\nfor 12-month LIBOR")
plt.hist(diff_attempt3.mean(), bins=30, color='cornflowerblue')
plt.axvline(df.LN_LIBOR.mean(), linestyle='--', color="black", label="real mean\n= "+str(round(df.LN_LIBOR.mean(), 6)))
plt.axvline(0, linestyle='--', color="forestgreen", label="mean = zero")
plt.ylabel("Frequency per " + str(attempt3_length))
plt.xlabel("Mean")
plt.legend()
plt.show()
print((diff_attempt3.mean()).describe())

plt.grid(b=True)
plt.title("Standard deviation of yield changes for \n" + str(attempt3_length) + " MMAR generated price paths\nfor 12-month LIBOR")
plt.hist(diff_attempt3.std(), bins=30, color='cornflowerblue')
plt.axvline(df.LN_LIBOR.std(), linestyle='--', color="black", label="real standard deviation\n= "+str(round(df.LN_LIBOR.std(), 5)))
plt.ylabel("Frequency per " + str(attempt3_length))
plt.xlabel("Standard Deviation")
plt.legend()
plt.show()


print("\n\nBelow is a description of the standard deviations of our " + str(attempt3_length) + " simulations.\n")
print((diff_attempt3.std()).describe())
count    10000.000000
mean        -0.000332
std          0.000150
min         -0.000593
25%         -0.000453
50%         -0.000342
75%         -0.000222
max          0.000053
dtype: float64

Below is a description of the standard deviations of our 10000 simulations.

count    10000.000000
mean         0.008401
std          0.000306
min          0.007613
25%          0.008198
50%          0.008361
75%          0.008557
max          0.011558
dtype: float64
In [1020]:
print("The mean is obviously very variable - we can simulate both downward and upward markets.\n\nWe can see that the standard deviation is also quite variable, but it is mostly near to what we expect.")
The mean is obviously very variable - we can simulate both downward and upward markets.

We can see that the standard deviation is also quite variable, but it is mostly near to what we expect.
In [1021]:
print("Here's the kurtosis for the real data for LIBOR.")

stats.kurtosis(df.LN_LIBOR[1:-1])
Here's the kurtosis for the real data for LIBOR.
Out[1021]:
86.64031207657735
In [1179]:
plt.figure(figsize=(16,4))
plt.grid(b=True)
plt.title("Kurtosis of yield changes for \n" + str(attempt3_length) + " MMAR generated price paths\nfor 12-month LIBOR")
plt.hist(stats.kurtosis(diff_attempt3), bins=100, color='cornflowerblue')
plt.axvline(stats.kurtosis(df.LN_LIBOR[1:-1]), linestyle='--', color="black", label="real kurtosis\n= "+str(round(stats.kurtosis(df.LN_LIBOR[1:-1]), 3)))
plt.axvline(0, linestyle='--', color="aqua", label="Gaussian kurtosis = 0")

kurtosis_array_diff_attempt3 = stats.kurtosis(diff_attempt3)
plt.axvline(np.quantile(kurtosis_array_diff_attempt3, 0.05), linestyle='--', color="purple", label="5th percentile")
plt.axvline(np.quantile(kurtosis_array_diff_attempt3,0.25), linestyle='--', color="blue", label="25th percentile")
plt.axvline(np.quantile(kurtosis_array_diff_attempt3,0.5), linestyle='--', color="forestgreen", label="50th percentile")
plt.axvline(np.quantile(kurtosis_array_diff_attempt3,0.75), linestyle='--', color="gold", label="75th percentile")
plt.axvline(np.quantile(kurtosis_array_diff_attempt3,0.95), linestyle='--', color="red", label="95th percentile")

plt.ylabel("Frequency per " + str(attempt3_length))
plt.xlabel("Kurtosis")
plt.legend()
plt.show()
In [1023]:
print("For LIBOR, the kurtosis is kind of all over the place!")
For LIBOR, the kurtosis is kind of all over the place!
In [1024]:
print("Below is the mean kurtosis of the price changes for every simulation.\nKeep in mind that it is overweighted by the heavy right tail.\n")

stats.kurtosis(diff_attempt3).mean()
Below is the mean kurtosis of the price changes for every simulation.
Keep in mind that it is overweighted by the heavy right tail.

Out[1024]:
26.865422908528835
In [1025]:
print("The median kurtosis is quite far from the real value, which was "+str(stats.kurtosis(df.LN_LIBOR[1:-1]))+".\n")

np.median(stats.kurtosis(diff_attempt3))
The median kurtosis is quite far from the real value, which was 86.64031207657735.

Out[1025]:
20.25030858020068
In [1067]:
# Here we test how well the simulations fit the original data, using the Kolmogorov-Smirnov test to test the log price changes

ks_test_LIBOR = [0 for x in range(0, attempt3_length)]
for i in range(0,attempt3_length):
    ks_test_LIBOR[i] = stats.ks_2samp(diff_attempt3[i], df.LN_LIBOR).pvalue
    

plt.grid(b=True)
plt.hist(ks_test_LIBOR, bins=20, color='cornflowerblue')
plt.title("Histogram of Kolmogorov-Smirnov P-values\nfor " + str(attempt3_length) + " MMAR simulations for LIBOR ")
plt.axvline(0.05, linestyle='--', color="black", label="5% significance")
plt.xlabel("P-Value")
plt.ylabel("Frequency per " + str(attempt3_length))
plt.legend()
plt.show()

plt.grid(b=True)
plt.plot(sorted(ks_test_LIBOR), color='cornflowerblue')
plt.title("Sorted Kolmogorov-Smirnov P-values\nfor " + str(attempt3_length) + " MMAR simulations for LIBOR ")
plt.axhline(0.05, linestyle='--', color="black", label="5% significance")
plt.axvline(sum(x<0.05 for x in ks_test_LIBOR), linestyle='--', color="forestgreen", label="Total rejections = "+str(sum(x<0.05 for x in ks_test_LIBOR)))
plt.xlabel("Index ouf of " + str(attempt3_length))
plt.ylabel("P-Value")
plt.legend()
plt.show()

print("Count of P<0.05 = "+ str(sum(x<0.05 for x in ks_test_LIBOR)) + " failures out of " + str(attempt3_length) + ".")
Count of P<0.05 = 10000 failures out of 10000.
In [1027]:
print("We can see that most simulations fail the Kolmogorov-Smirnov test, which is a shame.\n")
print("That said, the test may be overly sensitive.\n")
print("But unfortunately, the authors don't provide any better way to test if an MMAR simulation fits the real data.")
We can see that most simulations fail the Kolmogorov-Smirnov test, which is a shame.

That said, the test may be overly sensitive.

But unfortunately, the authors don't provide any better way to test if an MMAR simulation fits the real data.
In [ ]:
 
In [ ]:
 
In [ ]:
 

In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [965]:
K=13

for i in range(0,1000):
    if i%500 == 0:
        print("currently at i=" + str(i))
    cherie = np.cumsum(lognormal_cascade(k=K, v=1, ln_lambda=lambda_NORWAY, ln_theta=sigma_NORWAY))
    cherie = 2**(K)*cherie/cherie[-1]
    plt.plot(cherie, linewidth=0.1, color="orangered")
plt.xticks(np.arange(0, 2**(K)+1, 2**(K-3)))
plt.yticks(np.arange(0, 2**(K)+1, 2**(K-3)))
plt.title("1,000 simulations of trading time\nfor Norway")
plt.xlabel("Conventional time\n(days)")
plt.ylabel('"Trading time"\n(\u03B8 (t), normalized)')
plt.grid(b=True)
plt.show()

for i in range(0,1000):
    if i%500 == 0:
        print("currently at i=" + str(i))
    sweetie = np.cumsum(lognormal_cascade(k=K, v=1, ln_lambda=lambda_SWEDEN, ln_theta=sigma_SWEDEN))
    sweetie = 2**(K)*sweetie/sweetie[-1]
    plt.plot(sweetie, linewidth=0.1, color="goldenrod")
plt.xticks(np.arange(0, 2**(K)+1, 2**(K-3)))
plt.yticks(np.arange(0, 2**(K)+1, 2**(K-3)))
plt.title("1,000 simulations of trading time\nfor Sweden")
plt.xlabel("Conventional time\n(days)")
plt.ylabel('"Trading time"\n(\u03B8 (t), normalized)')
plt.grid(b=True)
plt.show()

for i in range(0,1000):
    if i%500 == 0:
        print("currently at i=" + str(i))
    libbie = np.cumsum(lognormal_cascade(k=K, v=1, ln_lambda=lambda_LIBOR, ln_theta=sigma_LIBOR))
    libbie = 2**(K)*libbie/libbie[-1]
    plt.plot(libbie, linewidth=0.1, color="cornflowerblue")
plt.xticks(np.arange(0, 2**(K)+1, 2**(K-3)))
plt.yticks(np.arange(0, 2**(K)+1, 2**(K-3)))
plt.title("1,000 simulations of trading time\nfor LIBOR")
plt.xlabel("Conventional time\n(days)")
plt.ylabel('"Trading time"\n(\u03B8 (t), normalized)')
plt.grid(b=True)
plt.show()
currently at i=0
currently at i=500
currently at i=0
currently at i=500
currently at i=0
currently at i=500
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [1161]:
Gaussian_length = 10000

Gaussian_Norway = pd.DataFrame()

start_time = datetime.now().strftime("%I:%M%p")

for i in range(0,Gaussian_length):
    print("Having started at "+str(start_time))
    print("i="+str(i))
    Gaussian_Norway[i] = [0.0 for x in range(0,2**13)]
    Gaussian_Norway[i][0] = df.Price_NORWAY[0]
    for j in range(1,len(Gaussian_Norway)):
        Gaussian_Norway[i][j] = Gaussian_Norway[i][j-1]*np.exp(np.random.normal(0,df.LN_NORWAY.std()))
    clear_output()
    
print("Having started at "+str(start_time))

Gaussian_Norway.to_csv('Gaussian_Norway.csv')
Having started at 12:50AM
In [1162]:
plt.figure(figsize=(24,10))

for i in range(0,Gaussian_length):
    K=13
    plt.title(str(Gaussian_length) + " Gaussian generated price paths")
    plt.xticks(np.arange(0, 2**(K)+1, 2**(K-3)))
    plt.xlabel("Time\n(days)")
    plt.ylabel('Price level')
    plt.grid(b=True)
    plt.plot(Gaussian_Norway[i], color="orangered", linewidth=0.1, alpha=0.2)
    

plt.show()
In [1163]:
# Here we calculate the LN differences

Gaussian_diff_attempt = [[0 for x in range(0,Gaussian_length)] for y in range (0,8192)]
for i in range(0,Gaussian_length):
    if i%(Gaussian_length/Gaussian_length) == 0:
        print("i="+str(i))
    
    Gaussian_diff_attempt[i]=np.log(Gaussian_Norway[i]).diff()
    
    clear_output()
    
    
Gaussian_diff_attempt= pd.DataFrame(Gaussian_diff_attempt)

Gaussian_diff_attempt.to_csv('Gaussian_diff_Norway.csv')
In [76]:
for i in range(0,5):
    plt.figure(figsize=(24,5))
    plt.title("Price increments for simulation " + str(i+1))
    plt.xticks(np.arange(0, 2**(13)+1, 2**(13-3)))

    plt.xlabel("Time\n(days)")
    plt.grid(b=True)
    plt.ylabel('Change\n(%)')
    
    axes = plt.gca()
    axes.set_ylim([-0.10,0.10])
    
    plt.plot(Gaussian_diff_attempt[i], color="darkviolet", linewidth=0.5)
    plt.show()
In [1174]:
plt.grid(b=True)
plt.title("Histogram of Gaussian simulated final prices\nafter 8192 days for Norway")
plt.hist(Gaussian_Norway.iloc[-1], bins=40, color="orangered")
plt.xlabel("Final price (USD/NOK)")
plt.ylabel("Frequency")
plt.axvline(Gaussian_Norway.iloc[-1].quantile(0.05), linestyle='--', color="purple", label="5th percentile")
plt.axvline(Gaussian_Norway.iloc[-1].quantile(0.25), linestyle='--', color="blue", label="25th percentile")
plt.axvline(Gaussian_Norway.iloc[-1].quantile(0.5), linestyle='--', color="forestgreen", label="50th percentile")
plt.axvline(Gaussian_Norway.iloc[-1].quantile(0.75), linestyle='--', color="gold", label="75th percentile")
plt.axvline(Gaussian_Norway.iloc[-1].quantile(0.95), linestyle='--', color="red", label="95th percentile")
plt.axvline(df.Price_NORWAY.iloc[-1], linestyle='--', color="black", label="real final price", linewidth=0.7)
plt.legend()
plt.show()
In [179]:
print("Here we can see some statistics about the price changes in the Norwegian exchange rate.\n")

df.LN_NORWAY.describe()
Here we can see some statistics about the price changes in the Norwegian exchange rate.

Out[179]:
count    7560.000000
mean        0.000033
std         0.007228
min        -0.064440
25%        -0.004051
50%         0.000000
75%         0.003942
max         0.059688
Name: LN_NORWAY, dtype: float64
In [196]:
df.LN_NORWAY.std()
Out[196]:
0.007228071748173812
In [67]:
attempt = pd.read_csv("~sib-ros/Desktop/Simulation collection/Norway_attempt.csv", index_col=0)
attempt.columns = attempt.columns.astype(int)

diff_attempt = pd.read_csv("~sib-ros/Desktop/Simulation collection/Norway_diff_attempt.csv", index_col=0)
diff_attempt.columns = diff_attempt.columns.astype(int)

Gaussian_Norway = pd.read_csv("~sib-ros/Desktop/Simulation collection/Gaussian_Norway.csv", index_col=0)
Gaussian_Norway.columns = attempt.columns.astype(int)

Gaussian_diff_attempt = pd.read_csv("~sib-ros/Desktop/Simulation collection/Gaussian_diff_Norway.csv", index_col=0)
Gaussian_diff_attempt.columns = diff_attempt.columns.astype(int)
In [185]:
plt.grid(b=True)
plt.title("Mean price change for \n" + str(Gaussian_length) + " Gaussian generated price paths\nfor NORWAY")
plt.hist(Gaussian_diff_attempt.mean(), bins=30, color='orangered')
plt.axvline(df.LN_NORWAY.mean(), linestyle='--', color="black", label="real mean\n= "+str(round(df.LN_NORWAY.mean(), 6)))
plt.axvline(0, linestyle='--', color="forestgreen", label="mean = zero")
plt.ylabel("Frequency per " + str(Gaussian_length))
plt.xlabel("Mean")
plt.legend()
plt.show()
print((Gaussian_diff_attempt.mean()).describe())

plt.grid(b=True)
plt.title("Standard deviation of price changes for \n" + str(Gaussian_length) + " Gaussian generated price paths\nfor NORWAY")
plt.hist(Gaussian_diff_attempt.std(), bins=30, color='orangered')
plt.axvline(df.LN_NORWAY.std(), linestyle='--', color="black", label="real standard deviation\n= "+str(round(df.LN_NORWAY.std(), 5)))
plt.ylabel("Frequency per " + str(Gaussian_length))
plt.xlabel("Standard Deviation")
plt.legend()
plt.show()


print("\n\nBelow is a description of the standard deviations of our 10,000 simulations.\n")
(Gaussian_diff_attempt.std()).describe()
count    1.000000e+04
mean    -1.877157e-07
std      8.041581e-05
min     -3.541401e-04
25%     -5.406017e-05
50%     -6.722251e-07
75%      5.395653e-05
max      3.784583e-04
dtype: float64

Below is a description of the standard deviations of our 10,000 simulations.

Out[185]:
count    10000.000000
mean         0.007228
std          0.000057
min          0.007033
25%          0.007189
50%          0.007228
75%          0.007266
max          0.007463
dtype: float64
In [181]:
print("The mean is obviously very variable - we can simulate both downward and upward markets.\n\nWe can see that the standard deviation is also quite variable, but it is mostly near to what we expect.")
The mean is obviously very variable - we can simulate both downward and upward markets.

We can see that the standard deviation is also quite variable, but it is mostly near to what we expect.
In [182]:
print("Here's the kurtosis for the real data for Norway.")

stats.kurtosis(df.LN_NORWAY[1:-1])
Here's the kurtosis for the real data for Norway.
Out[182]:
4.279929706945225
In [88]:
plt.figure(figsize=(16,4))
plt.grid(b=True)
plt.title("Kurtosis of price changes for \n" + str(Gaussian_length) + " Gaussian generated price paths\nfor Norway")
plt.hist(stats.kurtosis(Gaussian_diff_attempt), bins=100, color='orangered')
plt.axvline(stats.kurtosis(df.LN_NORWAY[1:-1]), linestyle='--', color="black", label="real kurtosis\n= "+str(round(stats.kurtosis(df.LN_NORWAY[1:-1]), 3)))
plt.axvline(0, linestyle='--', color="aqua", label="Gaussian kurtosis = 0")

kurtosis_array_Gaussian_diff_attempt = stats.kurtosis(Gaussian_diff_attempt)
plt.axvline(np.quantile(kurtosis_array_Gaussian_diff_attempt, 0.05), linestyle='--', color="purple", label="5th percentile")
plt.axvline(np.quantile(kurtosis_array_Gaussian_diff_attempt,0.25), linestyle='--', color="blue", label="25th percentile")
plt.axvline(np.quantile(kurtosis_array_Gaussian_diff_attempt,0.5), linestyle='--', color="forestgreen", label="50th percentile")
plt.axvline(np.quantile(kurtosis_array_Gaussian_diff_attempt,0.75), linestyle='--', color="gold", label="75th percentile")
plt.axvline(np.quantile(kurtosis_array_Gaussian_diff_attempt,0.95), linestyle='--', color="red", label="95th percentile")

plt.ylabel("Frequency per " + str(Gaussian_length))
plt.xlabel("Kurtosis")
plt.legend()
plt.show()
In [87]:
plt.figure(figsize=(16,4))
plt.grid(b=True)
plt.title("Kurtosis of price changes for \n" + str(Gaussian_length) + " Gaussian generated price paths\nfor Norway")
plt.hist(stats.kurtosis(Gaussian_diff_attempt), bins=100, color='orangered')
# plt.axvline(stats.kurtosis(df.LN_NORWAY[1:-1]), linestyle='--', color="black", label="real kurtosis\n= "+str(round(stats.kurtosis(df.LN_NORWAY[1:-1]), 3)))
plt.axvline(0, linestyle='--', color="aqua", label="Gaussian kurtosis = 0")

kurtosis_array_Gaussian_diff_attempt = stats.kurtosis(Gaussian_diff_attempt)
plt.axvline(np.quantile(kurtosis_array_Gaussian_diff_attempt, 0.05), linestyle='--', color="purple", label="5th percentile")
plt.axvline(np.quantile(kurtosis_array_Gaussian_diff_attempt,0.25), linestyle='--', color="blue", label="25th percentile")
plt.axvline(np.quantile(kurtosis_array_Gaussian_diff_attempt,0.5), linestyle='--', color="forestgreen", label="50th percentile")
plt.axvline(np.quantile(kurtosis_array_Gaussian_diff_attempt,0.75), linestyle='--', color="gold", label="75th percentile")
plt.axvline(np.quantile(kurtosis_array_Gaussian_diff_attempt,0.95), linestyle='--', color="red", label="95th percentile")

plt.ylabel("Frequency per " + str(Gaussian_length))
plt.xlabel("Kurtosis")
plt.legend()
plt.show()
In [100]:
plt.grid(b=True)
plt.title("Kurtosis of price changes for \n" + str(Gaussian_length) + " Gaussian generated price paths\nfor Norway")
plt.hist(stats.kurtosis(Gaussian_diff_attempt), bins=100, color='orangered')
# plt.axvline(stats.kurtosis(df.LN_NORWAY[1:-1]), linestyle='--', color="black", label="real kurtosis\n= "+str(round(stats.kurtosis(df.LN_NORWAY[1:-1]), 3)))
plt.axvline(0, linestyle='--', color="aqua", label="Gaussian kurtosis = 0")

kurtosis_array_Gaussian_diff_attempt = stats.kurtosis(Gaussian_diff_attempt)
plt.axvline(np.quantile(kurtosis_array_Gaussian_diff_attempt, 0.05), linestyle='--', color="purple", label="5th percentile")
plt.axvline(np.quantile(kurtosis_array_Gaussian_diff_attempt,0.25), linestyle='--', color="blue", label="25th percentile")
plt.axvline(np.quantile(kurtosis_array_Gaussian_diff_attempt,0.5), linestyle='--', color="forestgreen", label="50th percentile")
plt.axvline(np.quantile(kurtosis_array_Gaussian_diff_attempt,0.75), linestyle='--', color="gold", label="75th percentile")
plt.axvline(np.quantile(kurtosis_array_Gaussian_diff_attempt,0.95), linestyle='--', color="red", label="95th percentile")

plt.ylabel("Frequency per " + str(Gaussian_length))
plt.xlabel("Kurtosis")
plt.legend()
plt.show()
In [184]:
print("But at least the kurtosis is about right!")
But at least the kurtosis is about right!
In [208]:
print("Below is the mean kurtosis of the price changes for every simulation.\nKeep in mind that it is overweighted by the heavy right tail.\n")

stats.kurtosis(diff_attempt).mean()
Below is the mean kurtosis of the price changes for every simulation.
Keep in mind that it is overweighted by the heavy right tail.

Out[208]:
5.15394832632845
In [701]:
print("The median kurtosis is about right. The real kurtosis was "+str(stats.kurtosis(df.LN_NORWAY[1:-1]))+".\n")

np.median(stats.kurtosis(diff_attempt))
The median kurtosis is about right. The real kurtosis was 4.279929706945225.

Out[701]:
4.616610555074015
In [1182]:
# Here we test how well the simulations fit the original data, using the Kolmogorov-Smirnov test to test the log price changes

ks_test_Gaussian_NORWAY = [0 for x in range(0, Gaussian_length)]
for i in range(0,Gaussian_length):
    ks_test_Gaussian_NORWAY[i] = stats.ks_2samp(Gaussian_diff_attempt[i], df.LN_NORWAY).pvalue
    

plt.grid(b=True)
plt.hist(ks_test_Gaussian_NORWAY, bins=20, color='orangered')
plt.title("Histogram of Kolmogorov-Smirnov P-values\nfor " + str(Gaussian_length) + " Gaussian simulations for Norway ")
plt.axvline(0.05, linestyle='--', color="black", label="5% significance")
plt.xlabel("P-Value")
plt.ylabel("Frequency per " + str(Gaussian_length))
plt.legend()
plt.show()

plt.grid(b=True)
plt.plot(sorted(ks_test_Gaussian_NORWAY), color='orangered')
plt.title("Sorted Kolmogorov-Smirnov P-values\nfor " + str(Gaussian_length) + " Gaussian simulations for Norway ")
plt.axhline(0.05, linestyle='--', color="black", label="5% significance")
plt.axvline(sum(x<0.05 for x in ks_test_Gaussian_NORWAY), linestyle='--', color="purple", label="Total rejections = "+str(sum(x<0.05 for x in ks_test_Gaussian_NORWAY)))
plt.xlabel("Index ouf of " + str(Gaussian_length))
plt.ylabel("P-Value")
plt.legend()
plt.show()

print("Count of P<0.05 = "+ str(sum(x<0.05 for x in ks_test_Gaussian_NORWAY)) + " failures out of " + str(Gaussian_length) + ".")
Count of P<0.05 = 10000 failures out of 10000.
In [1183]:
print("We can see that all Gaussian simulations fail the Kolmogorov-Smirnov test.\n")
We can see that all Gaussian simulations fail the Kolmogorov-Smirnov test.

In [1216]:
print("Like last time, we want to add some restrictions to our simulations, to avoid 'absurd' results.")
print("\nWe use the same restrictions as last time, namely:\n")
print("For Sweden: 100 < P(t) < 5000 for all t.")
print("For LIBOR: 0.1 < P(t) < 20 for all t.")
Like last time, we want to add some restrictions to our simulations, to avoid 'absurd' results.

We use the same restrictions as last time, namely:

For Sweden: 100 < P(t) < 5000 for all t.
For LIBOR: 0.1 < P(t) < 20 for all t.
In [1197]:
def gaussian_attempt(top_limit, bottom_limit, mean_return, std_return, original_price, number_of_simulated_days):
        
    TEMPORARY_array = [0.0 for x in range(number_of_simulated_days)]
    
    for j in range(0,number_of_simulated_days):
        if j == 0:
            TEMPORARY_array[j] = original_price
        else:
            Z = np.random.normal(mean_return, std_return)
            TEMPORARY_array[j] = TEMPORARY_array[j-1]*np.exp(Z)
    
    if (all(Pt < top_limit for Pt in TEMPORARY_array) and all(Pt > bottom_limit for Pt in TEMPORARY_array))==True:
        return TEMPORARY_array
        
    else:
        return gaussian_attempt(top_limit, bottom_limit, mean_return, std_return, original_price, number_of_simulated_days)
In [1241]:
print("We can now beging generating simple, 'random walk'-style market simulations.")
We can now beging generating simple, 'random walk'-style market simulations.
In [1242]:
Gaussian_length2 = 10000

Gaussian_Sweden = pd.DataFrame()

Sweden_top_limit = 5000
Sweden_bottom_limit = 100

number_of_simulated_days = 2**13+1

start_time = datetime.now().strftime("%I:%M%p")


for i in range(0,Gaussian_length2):
    print("Having started at "+str(start_time))
    print("i="+str(i))
        
    Gaussian_Sweden[i] = gaussian_attempt(Sweden_top_limit, Sweden_bottom_limit, df.LN_SWEDEN.mean(), df.LN_SWEDEN.std(), df.Price_SWEDEN[0], number_of_simulated_days)
    clear_output()
    
print("Having started at "+str(start_time))


Gaussian_Sweden.to_csv('Gaussian_Sweden.csv')
Having started at 11:05AM
In [1243]:
plt.figure(figsize=(24,10))

for i in range(0,Gaussian_length2):
    K=13
    plt.title(str(Gaussian_length2) + " Gaussian generated price paths")
    plt.xticks(np.arange(0, 2**(K)+1, 2**(K-3)))
    plt.xlabel("Time\n(days)")
    plt.ylabel('Price level')
    plt.grid(b=True)
    plt.plot(Gaussian_Sweden[i], color="goldenrod", linewidth=0.1, alpha=0.2)
    

plt.show()
In [1244]:
# Here we calculate the LN differences

Gaussian_diff_attempt2 = [np.log(Gaussian_Sweden[x]).diff() for x in range(0,Gaussian_length2)]
Gaussian_diff_attempt2 = pd.DataFrame(Gaussian_diff_attempt2).transpose()
Gaussian_diff_attempt2 = Gaussian_diff_attempt2[:-1]


Gaussian_diff_attempt2

Gaussian_diff_attempt2.to_csv('Gaussian_diff_Sweden.csv')
In [70]:
for i in range(0,5):
    plt.figure(figsize=(24,5))
    plt.title("Price increments for simulation " + str(i+1))
    plt.xticks(np.arange(0, 2**(13)+1, 2**(13-3)))

    plt.xlabel("Time\n(days)")
    plt.grid(b=True)
    plt.ylabel('Change\n(%)')
    
    axes = plt.gca()
    axes.set_ylim([-0.175,0.175])
    
    plt.plot(Gaussian_diff_attempt2[i], color="orange", linewidth=0.5)
    plt.show()
In [93]:
plt.grid(b=True)
plt.title("Histogram of Gaussian simulated final prices\nafter 8192 days for Sweden")
plt.hist(Gaussian_Sweden.iloc[-1], bins=40, color="goldenrod")
plt.xlabel("Final price (SEK)")
plt.ylabel("Frequency")
plt.axvline(Gaussian_Sweden.iloc[-1].quantile(0.05), linestyle='--', color="purple", label="5th percentile")
plt.axvline(Gaussian_Sweden.iloc[-1].quantile(0.25), linestyle='--', color="blue", label="25th percentile")
plt.axvline(Gaussian_Sweden.iloc[-1].quantile(0.5), linestyle='--', color="forestgreen", label="50th percentile")
plt.axvline(Gaussian_Sweden.iloc[-1].quantile(0.75), linestyle='--', color="gold", label="75th percentile")
plt.axvline(Gaussian_Sweden.iloc[-1].quantile(0.95), linestyle='--', color="red", label="95th percentile")
plt.axvline(df.Price_SWEDEN.iloc[-1], linestyle='--', color="black", label="real final price", linewidth=0.7)
plt.legend()
plt.show()
In [77]:
plt.grid(b=True)
plt.title("Mean price change for \n" + str(Gaussian_length2) + " Gaussian generated price paths\nfor SWEDEN")
plt.hist(Gaussian_diff_attempt2.mean(), bins=30, color='goldenrod')
plt.axvline(df.LN_SWEDEN.mean(), linestyle='--', color="black", label="real mean\n= "+str(round(df.LN_SWEDEN.mean(), 6)))
plt.axvline(0, linestyle='--', color="forestgreen", label="mean = zero")
plt.ylabel("Frequency per " + str(Gaussian_length2))
plt.xlabel("Mean")
plt.legend()
plt.show()
print((Gaussian_diff_attempt2.mean()).describe())

plt.grid(b=True)
plt.title("Standard deviation of price changes for \n" + str(Gaussian_length2) + " Gaussian generated price paths\nfor SWEDEN")
plt.hist(Gaussian_diff_attempt2.std(), bins=30, color='goldenrod')
plt.axvline(df.LN_SWEDEN.std(), linestyle='--', color="black", label="real standard deviation\n= "+str(round(df.LN_SWEDEN.std(), 5)))
plt.ylabel("Frequency per " + str(Gaussian_length2))
plt.xlabel("Standard Deviation")
plt.legend()
plt.show()


print("\n\nBelow is a description of the standard deviations of our 10,000 simulations.\n")
(Gaussian_diff_attempt2.std()).describe()
count    10000.000000
mean         0.000283
std          0.000092
min         -0.000015
25%          0.000220
50%          0.000295
75%          0.000356
max          0.000443
dtype: float64

Below is a description of the standard deviations of our 10,000 simulations.

Out[77]:
count    10000.000000
mean         0.014582
std          0.000113
min          0.014124
25%          0.014504
50%          0.014583
75%          0.014658
max          0.015016
dtype: float64
In [181]:
print("The mean is obviously very variable - we can simulate both downward and upward markets.\n\nWe can see that the standard deviation is also quite variable, but it is mostly near to what we expect.")
The mean is obviously very variable - we can simulate both downward and upward markets.

We can see that the standard deviation is also quite variable, but it is mostly near to what we expect.
In [182]:
print("Here's the kurtosis for the real data for Norway.")

stats.kurtosis(df.LN_NORWAY[1:-1])
Here's the kurtosis for the real data for Norway.
Out[182]:
4.279929706945225
In [79]:
plt.figure(figsize=(16,4))
plt.grid(b=True)
plt.title("Kurtosis of price changes for \n" + str(Gaussian_length2) + " Gaussian generated price paths\nfor Sweden")
plt.hist(stats.kurtosis(Gaussian_diff_attempt2), bins=100, color='goldenrod')
plt.axvline(stats.kurtosis(df.LN_SWEDEN[1:-1]), linestyle='--', color="black", label="real kurtosis\n= "+str(round(stats.kurtosis(df.LN_SWEDEN[1:-1]), 3)))
plt.axvline(0, linestyle='--', color="aqua", label="Gaussian kurtosis = 0")

kurtosis_array_Gaussian_diff_attempt2 = stats.kurtosis(Gaussian_diff_attempt2)
plt.axvline(np.quantile(kurtosis_array_Gaussian_diff_attempt2, 0.05), linestyle='--', color="purple", label="5th percentile")
plt.axvline(np.quantile(kurtosis_array_Gaussian_diff_attempt2,0.25), linestyle='--', color="blue", label="25th percentile")
plt.axvline(np.quantile(kurtosis_array_Gaussian_diff_attempt2,0.5), linestyle='--', color="forestgreen", label="50th percentile")
plt.axvline(np.quantile(kurtosis_array_Gaussian_diff_attempt2,0.75), linestyle='--', color="gold", label="75th percentile")
plt.axvline(np.quantile(kurtosis_array_Gaussian_diff_attempt2,0.95), linestyle='--', color="red", label="95th percentile")

plt.ylabel("Frequency per " + str(Gaussian_length2))
plt.xlabel("Kurtosis")
plt.legend()
plt.show()
In [91]:
plt.figure(figsize=(16,4))
plt.grid(b=True)
plt.title("Kurtosis of price changes for \n" + str(Gaussian_length2) + " Gaussian generated price paths\nfor Sweden")
plt.hist(stats.kurtosis(Gaussian_diff_attempt2), bins=100, color='goldenrod')
# plt.axvline(stats.kurtosis(df.LN_SWEDEN[1:-1]), linestyle='--', color="black", label="real kurtosis\n= "+str(round(stats.kurtosis(df.LN_SWEDEN[1:-1]), 3)))
plt.axvline(0, linestyle='--', color="aqua", label="Gaussian kurtosis = 0")

kurtosis_array_Gaussian_diff_attempt2 = stats.kurtosis(Gaussian_diff_attempt2)
plt.axvline(np.quantile(kurtosis_array_Gaussian_diff_attempt2, 0.05), linestyle='--', color="purple", label="5th percentile")
plt.axvline(np.quantile(kurtosis_array_Gaussian_diff_attempt2,0.25), linestyle='--', color="blue", label="25th percentile")
plt.axvline(np.quantile(kurtosis_array_Gaussian_diff_attempt2,0.5), linestyle='--', color="forestgreen", label="50th percentile")
plt.axvline(np.quantile(kurtosis_array_Gaussian_diff_attempt2,0.75), linestyle='--', color="gold", label="75th percentile")
plt.axvline(np.quantile(kurtosis_array_Gaussian_diff_attempt2,0.95), linestyle='--', color="red", label="95th percentile")

plt.ylabel("Frequency per " + str(Gaussian_length2))
plt.xlabel("Kurtosis")
plt.legend()
plt.show()
In [101]:
plt.grid(b=True)
plt.title("Kurtosis of price changes for \n" + str(Gaussian_length2) + " Gaussian generated price paths\nfor Sweden")
plt.hist(stats.kurtosis(Gaussian_diff_attempt2), bins=100, color='goldenrod')
# plt.axvline(stats.kurtosis(df.LN_SWEDEN[1:-1]), linestyle='--', color="black", label="real kurtosis\n= "+str(round(stats.kurtosis(df.LN_SWEDEN[1:-1]), 3)))
plt.axvline(0, linestyle='--', color="aqua", label="Gaussian kurtosis = 0")

kurtosis_array_Gaussian_diff_attempt2 = stats.kurtosis(Gaussian_diff_attempt2)
plt.axvline(np.quantile(kurtosis_array_Gaussian_diff_attempt2, 0.05), linestyle='--', color="purple", label="5th percentile")
plt.axvline(np.quantile(kurtosis_array_Gaussian_diff_attempt2,0.25), linestyle='--', color="blue", label="25th percentile")
plt.axvline(np.quantile(kurtosis_array_Gaussian_diff_attempt2,0.5), linestyle='--', color="forestgreen", label="50th percentile")
plt.axvline(np.quantile(kurtosis_array_Gaussian_diff_attempt2,0.75), linestyle='--', color="gold", label="75th percentile")
plt.axvline(np.quantile(kurtosis_array_Gaussian_diff_attempt2,0.95), linestyle='--', color="red", label="95th percentile")

plt.ylabel("Frequency per " + str(Gaussian_length2))
plt.xlabel("Kurtosis")
plt.legend()
plt.show()
In [80]:
# Here we test how well the simulations fit the original data, using the Kolmogorov-Smirnov test to test the log price changes

ks_test_Gaussian_SWEDEN = [0 for x in range(0, Gaussian_length2)]
for i in range(0,Gaussian_length2):
    ks_test_Gaussian_SWEDEN[i] = stats.ks_2samp(Gaussian_diff_attempt2[i], df.LN_SWEDEN).pvalue
    

plt.grid(b=True)
plt.hist(ks_test_Gaussian_SWEDEN, bins=20, color='goldenrod')
plt.title("Histogram of Kolmogorov-Smirnov P-values\nfor " + str(Gaussian_length2) + " Gaussian simulations for Sweden")
plt.axvline(0.05, linestyle='--', color="black", label="5% significance")
plt.xlabel("P-Value")
plt.ylabel("Frequency per " + str(Gaussian_length2))
plt.legend()
plt.show()

plt.grid(b=True)
plt.plot(sorted(ks_test_Gaussian_SWEDEN), color='goldenrod')
plt.title("Sorted Kolmogorov-Smirnov P-values\nfor " + str(Gaussian_length2) + " Gaussian simulations for Sweden")
plt.axhline(0.05, linestyle='--', color="black", label="5% significance")
plt.axvline(sum(x<0.05 for x in ks_test_Gaussian_SWEDEN), linestyle='--', color="orange", label="Total rejections = "+str(sum(x<0.05 for x in ks_test_Gaussian_SWEDEN)))
plt.xlabel("Index ouf of " + str(Gaussian_length2))
plt.ylabel("P-Value")
plt.legend()
plt.show()

print("Count of P<0.05 = "+ str(sum(x<0.05 for x in ks_test_Gaussian_SWEDEN)) + " failures out of " + str(Gaussian_length2) + ".")
Count of P<0.05 = 10000 failures out of 10000.
In [ ]:
 
In [ ]:
 
In [1246]:
Gaussian_length3 = 10000

Gaussian_LIBOR = pd.DataFrame()

LIBOR_top_limit = 20
LIBOR_bottom_limit = 0.1

number_of_simulated_days = 2**13+1

start_time = datetime.now().strftime("%I:%M%p")


for i in range(0,Gaussian_length3):
    print("Having started at "+str(start_time))
    print("i="+str(i))
        
    Gaussian_LIBOR[i] = gaussian_attempt(LIBOR_top_limit, LIBOR_bottom_limit, df.LN_LIBOR.mean(), df.LN_LIBOR.std(), df.Price_LIBOR[0], number_of_simulated_days)
    clear_output()
    
print("Having started at "+str(start_time))


Gaussian_LIBOR.to_csv('Gaussian_LIBOR.csv')
Having started at 11:27AM
In [1247]:
plt.figure(figsize=(24,10))

for i in range(0,Gaussian_length3):
    K=13
    plt.title(str(Gaussian_length3) + " Gaussian generated price paths")
    plt.xticks(np.arange(0, 2**(K)+1, 2**(K-3)))
    plt.xlabel("Time\n(days)")
    plt.ylabel('Price level')
    plt.grid(b=True)
    plt.plot(Gaussian_LIBOR[i], color="cornflowerblue", linewidth=0.1, alpha=0.2)
    

plt.show()
In [1248]:
# Here we calculate the LN differences

Gaussian_diff_attempt3 = [np.log(Gaussian_LIBOR[x]).diff() for x in range(0,Gaussian_length3)]
Gaussian_diff_attempt3 = pd.DataFrame(Gaussian_diff_attempt3).transpose()
Gaussian_diff_attempt3 = Gaussian_diff_attempt3[:-1]

Gaussian_diff_attempt3

Gaussian_diff_attempt3.to_csv('Gaussian_diff_LIBOR.csv')
In [71]:
for i in range(0,5):
    plt.figure(figsize=(24,5))
    plt.title("Price increments for simulation " + str(i+1))
    plt.xticks(np.arange(0, 2**(13)+1, 2**(13-3)))

    plt.xlabel("Time\n(days)")
    plt.grid(b=True)
    plt.ylabel('Change\n(%)')
    
    axes = plt.gca()
    axes.set_ylim([-0.25,0.25])
    
    plt.plot(Gaussian_diff_attempt3[i], color="forestgreen", linewidth=0.5)
    plt.show()
In [95]:
plt.grid(b=True)
plt.title("Histogram of Gaussian simulated final prices\nafter 8192 days for LIBOR")
plt.hist(Gaussian_LIBOR.iloc[-1], bins=40, color="cornflowerblue")
plt.xlabel("Final yield (%)")
plt.ylabel("Frequency")
plt.axvline(Gaussian_LIBOR.iloc[-1].quantile(0.05), linestyle='--', color="purple", label="5th percentile")
plt.axvline(Gaussian_LIBOR.iloc[-1].quantile(0.25), linestyle='--', color="blue", label="25th percentile")
plt.axvline(Gaussian_LIBOR.iloc[-1].quantile(0.5), linestyle='--', color="forestgreen", label="50th percentile")
plt.axvline(Gaussian_LIBOR.iloc[-1].quantile(0.75), linestyle='--', color="gold", label="75th percentile")
plt.axvline(Gaussian_LIBOR.iloc[-1].quantile(0.95), linestyle='--', color="red", label="95th percentile")
plt.axvline(df.Price_LIBOR.iloc[-1], linestyle='--', color="black", label="real final price", linewidth=0.7)
plt.legend()
plt.show()
In [97]:
plt.grid(b=True)
plt.title("Mean yield change for \n" + str(Gaussian_length3) + " Gaussian generated price paths\nfor LIBOR")
plt.hist(Gaussian_diff_attempt3.mean(), bins=30, color='cornflowerblue')
plt.axvline(df.LN_LIBOR.mean(), linestyle='--', color="black", label="real mean\n= "+str(round(df.LN_LIBOR.mean(), 6)))
plt.axvline(0, linestyle='--', color="forestgreen", label="mean = zero")
plt.ylabel("Frequency per " + str(Gaussian_length3))
plt.xlabel("Mean")
plt.legend()
plt.show()
print((Gaussian_diff_attempt3.mean()).describe())

plt.grid(b=True)
plt.title("Standard deviation of yield changes for \n" + str(Gaussian_length3) + " Gaussian generated price paths\nfor LIBOR")
plt.hist(Gaussian_diff_attempt3.std(), bins=30, color='cornflowerblue')
plt.axvline(df.LN_LIBOR.std(), linestyle='--', color="black", label="real standard deviation\n= "+str(round(df.LN_LIBOR.std(), 5)))
plt.ylabel("Frequency per " + str(Gaussian_length3))
plt.xlabel("Standard Deviation")
plt.legend()
plt.show()


print("\n\nBelow is a description of the standard deviations of our 10,000 simulations.\n")
(Gaussian_diff_attempt3.std()).describe()
count    10000.000000
mean        -0.000326
std          0.000090
min         -0.000585
25%         -0.000387
50%         -0.000326
75%         -0.000265
max          0.000024
dtype: float64

Below is a description of the standard deviations of our 10,000 simulations.

Out[97]:
count    10000.000000
mean         0.008318
std          0.000065
min          0.008074
25%          0.008274
50%          0.008318
75%          0.008362
max          0.008557
dtype: float64
In [181]:
print("The mean is obviously very variable - we can simulate both downward and upward markets.\n\nWe can see that the standard deviation is also quite variable, but it is mostly near to what we expect.")
The mean is obviously very variable - we can simulate both downward and upward markets.

We can see that the standard deviation is also quite variable, but it is mostly near to what we expect.
In [182]:
print("Here's the kurtosis for the real data for Norway.")

stats.kurtosis(df.LN_NORWAY[1:-1])
Here's the kurtosis for the real data for Norway.
Out[182]:
4.279929706945225
In [82]:
plt.figure(figsize=(16,4))
plt.grid(b=True)
plt.title("Kurtosis of price changes for \n" + str(Gaussian_length3) + " Gaussian generated price paths\nfor Norway")
plt.hist(stats.kurtosis(Gaussian_diff_attempt3), bins=100, color='cornflowerblue')
plt.axvline(stats.kurtosis(df.LN_LIBOR[1:-1]), linestyle='--', color="black", label="real kurtosis\n= "+str(round(stats.kurtosis(df.LN_LIBOR[1:-1]), 3)))
plt.axvline(0, linestyle='--', color="aqua", label="Gaussian kurtosis = 0")

kurtosis_array_Gaussian_diff_attempt3 = stats.kurtosis(Gaussian_diff_attempt3)
plt.axvline(np.quantile(kurtosis_array_Gaussian_diff_attempt3, 0.05), linestyle='--', color="purple", label="5th percentile")
plt.axvline(np.quantile(kurtosis_array_Gaussian_diff_attempt3,0.25), linestyle='--', color="blue", label="25th percentile")
plt.axvline(np.quantile(kurtosis_array_Gaussian_diff_attempt3,0.5), linestyle='--', color="forestgreen", label="50th percentile")
plt.axvline(np.quantile(kurtosis_array_Gaussian_diff_attempt3,0.75), linestyle='--', color="gold", label="75th percentile")
plt.axvline(np.quantile(kurtosis_array_Gaussian_diff_attempt3,0.95), linestyle='--', color="red", label="95th percentile")

plt.ylabel("Frequency per " + str(Gaussian_length3))
plt.xlabel("Kurtosis")
plt.legend()
plt.show()
In [92]:
plt.figure(figsize=(16,4))
plt.grid(b=True)
plt.title("Kurtosis of price changes for \n" + str(Gaussian_length3) + " Gaussian generated price paths\nfor Norway")
plt.hist(stats.kurtosis(Gaussian_diff_attempt3), bins=100, color='cornflowerblue')
# plt.axvline(stats.kurtosis(df.LN_LIBOR[1:-1]), linestyle='--', color="black", label="real kurtosis\n= "+str(round(stats.kurtosis(df.LN_LIBOR[1:-1]), 3)))
plt.axvline(0, linestyle='--', color="aqua", label="Gaussian kurtosis = 0")

kurtosis_array_Gaussian_diff_attempt3 = stats.kurtosis(Gaussian_diff_attempt3)
plt.axvline(np.quantile(kurtosis_array_Gaussian_diff_attempt3, 0.05), linestyle='--', color="purple", label="5th percentile")
plt.axvline(np.quantile(kurtosis_array_Gaussian_diff_attempt3,0.25), linestyle='--', color="blue", label="25th percentile")
plt.axvline(np.quantile(kurtosis_array_Gaussian_diff_attempt3,0.5), linestyle='--', color="forestgreen", label="50th percentile")
plt.axvline(np.quantile(kurtosis_array_Gaussian_diff_attempt3,0.75), linestyle='--', color="gold", label="75th percentile")
plt.axvline(np.quantile(kurtosis_array_Gaussian_diff_attempt3,0.95), linestyle='--', color="red", label="95th percentile")

plt.ylabel("Frequency per " + str(Gaussian_length3))
plt.xlabel("Kurtosis")
plt.legend()
plt.show()
In [102]:
plt.grid(b=True)
plt.title("Kurtosis of price changes for \n" + str(Gaussian_length3) + " Gaussian generated price paths\nfor Norway")
plt.hist(stats.kurtosis(Gaussian_diff_attempt3), bins=100, color='cornflowerblue')
# plt.axvline(stats.kurtosis(df.LN_LIBOR[1:-1]), linestyle='--', color="black", label="real kurtosis\n= "+str(round(stats.kurtosis(df.LN_LIBOR[1:-1]), 3)))
plt.axvline(0, linestyle='--', color="aqua", label="Gaussian kurtosis = 0")

kurtosis_array_Gaussian_diff_attempt3 = stats.kurtosis(Gaussian_diff_attempt3)
plt.axvline(np.quantile(kurtosis_array_Gaussian_diff_attempt3, 0.05), linestyle='--', color="purple", label="5th percentile")
plt.axvline(np.quantile(kurtosis_array_Gaussian_diff_attempt3,0.25), linestyle='--', color="blue", label="25th percentile")
plt.axvline(np.quantile(kurtosis_array_Gaussian_diff_attempt3,0.5), linestyle='--', color="forestgreen", label="50th percentile")
plt.axvline(np.quantile(kurtosis_array_Gaussian_diff_attempt3,0.75), linestyle='--', color="gold", label="75th percentile")
plt.axvline(np.quantile(kurtosis_array_Gaussian_diff_attempt3,0.95), linestyle='--', color="red", label="95th percentile")

plt.ylabel("Frequency per " + str(Gaussian_length3))
plt.xlabel("Kurtosis")
plt.legend()
plt.show()
In [83]:
# Here we test how well the simulations fit the original data, using the Kolmogorov-Smirnov test to test the log price changes

ks_test_Gaussian_LIBOR = [0 for x in range(0, Gaussian_length3)]
for i in range(0,Gaussian_length3):
    ks_test_Gaussian_LIBOR[i] = stats.ks_2samp(Gaussian_diff_attempt3[i], df.LN_LIBOR).pvalue
    

plt.grid(b=True)
plt.hist(ks_test_Gaussian_LIBOR, bins=20, color='cornflowerblue')
plt.title("Histogram of Kolmogorov-Smirnov P-values\nfor " + str(Gaussian_length3) + " Gaussian simulations for Norway ")
plt.axvline(0.05, linestyle='--', color="black", label="5% significance")
plt.xlabel("P-Value")
plt.ylabel("Frequency per " + str(Gaussian_length3))
plt.legend()
plt.show()

plt.grid(b=True)
plt.plot(sorted(ks_test_Gaussian_LIBOR), color='cornflowerblue')
plt.title("Sorted Kolmogorov-Smirnov P-values\nfor " + str(Gaussian_length3) + " Gaussian simulations for Norway ")
plt.axhline(0.05, linestyle='--', color="black", label="5% significance")
plt.axvline(sum(x<0.05 for x in ks_test_Gaussian_LIBOR), linestyle='--', color="forestgreen", label="Total rejections = "+str(sum(x<0.05 for x in ks_test_Gaussian_LIBOR)))
plt.xlabel("Index ouf of " + str(Gaussian_length3))
plt.ylabel("P-Value")
plt.legend()
plt.show()

print("Count of P<0.05 = "+ str(sum(x<0.05 for x in ks_test_Gaussian_LIBOR)) + " failures out of " + str(Gaussian_length3) + ".")
Count of P<0.05 = 10000 failures out of 10000.
In [ ]:
 
In [103]:
print("Let's get all the kurtosis figures onto one graph")
Let's get all the kurtosis figures onto one graph
In [107]:
# NORWAY GRAPHS
plt.grid(b=True)
plt.title("Kurtosis of price changes for \n" + str(Gaussian_length) + " Gaussian generated price paths\nfor Norway")
plt.hist(stats.kurtosis(Gaussian_diff_attempt), bins=100, color='orangered', alpha = 0.5, label = "Norway")
# plt.axvline(stats.kurtosis(df.LN_NORWAY[1:-1]), linestyle='--', color="black", label="real kurtosis\n= "+str(round(stats.kurtosis(df.LN_NORWAY[1:-1]), 3)))
plt.axvline(0, linestyle='--', color="aqua", label="Gaussian kurtosis = 0")

# kurtosis_array_Gaussian_diff_attempt = stats.kurtosis(Gaussian_diff_attempt)
# plt.axvline(np.quantile(kurtosis_array_Gaussian_diff_attempt, 0.05), linestyle='--', color="purple", label="5th percentile")
# plt.axvline(np.quantile(kurtosis_array_Gaussian_diff_attempt,0.25), linestyle='--', color="blue", label="25th percentile")
# plt.axvline(np.quantile(kurtosis_array_Gaussian_diff_attempt,0.5), linestyle='--', color="forestgreen", label="50th percentile")
# plt.axvline(np.quantile(kurtosis_array_Gaussian_diff_attempt,0.75), linestyle='--', color="gold", label="75th percentile")
# plt.axvline(np.quantile(kurtosis_array_Gaussian_diff_attempt,0.95), linestyle='--', color="red", label="95th percentile")


# SWEDEN GRAPHS
plt.grid(b=True)
plt.title("Kurtosis of price changes for \n" + str(Gaussian_length2) + " Gaussian generated price paths\nfor Sweden")
plt.hist(stats.kurtosis(Gaussian_diff_attempt2), bins=100, color='goldenrod', alpha = 0.5, label = "Sweden")
# plt.axvline(stats.kurtosis(df.LN_SWEDEN[1:-1]), linestyle='--', color="black", label="real kurtosis\n= "+str(round(stats.kurtosis(df.LN_SWEDEN[1:-1]), 3)))
# plt.axvline(0, linestyle='--', color="aqua", label="Gaussian kurtosis = 0")

# kurtosis_array_Gaussian_diff_attempt2 = stats.kurtosis(Gaussian_diff_attempt2)
# plt.axvline(np.quantile(kurtosis_array_Gaussian_diff_attempt2, 0.05), linestyle='--', color="purple", label="5th percentile")
# plt.axvline(np.quantile(kurtosis_array_Gaussian_diff_attempt2,0.25), linestyle='--', color="blue", label="25th percentile")
# plt.axvline(np.quantile(kurtosis_array_Gaussian_diff_attempt2,0.5), linestyle='--', color="forestgreen", label="50th percentile")
# plt.axvline(np.quantile(kurtosis_array_Gaussian_diff_attempt2,0.75), linestyle='--', color="gold", label="75th percentile")
# plt.axvline(np.quantile(kurtosis_array_Gaussian_diff_attempt2,0.95), linestyle='--', color="red", label="95th percentile")


# LIBOR GRAPHS
plt.grid(b=True)
plt.title("Kurtosis of price changes for \n" + str(Gaussian_length3) + " Gaussian generated price paths\nfor Norway")
plt.hist(stats.kurtosis(Gaussian_diff_attempt3), bins=100, color='cornflowerblue', alpha = 0.5, label = "LIBOR")
# plt.axvline(stats.kurtosis(df.LN_LIBOR[1:-1]), linestyle='--', color="black", label="real kurtosis\n= "+str(round(stats.kurtosis(df.LN_LIBOR[1:-1]), 3)))
# plt.axvline(0, linestyle='--', color="aqua", label="Gaussian kurtosis = 0")

# kurtosis_array_Gaussian_diff_attempt3 = stats.kurtosis(Gaussian_diff_attempt3)
# plt.axvline(np.quantile(kurtosis_array_Gaussian_diff_attempt3, 0.05), linestyle='--', color="purple", label="5th percentile")
# plt.axvline(np.quantile(kurtosis_array_Gaussian_diff_attempt3,0.25), linestyle='--', color="blue", label="25th percentile")
# plt.axvline(np.quantile(kurtosis_array_Gaussian_diff_attempt3,0.5), linestyle='--', color="forestgreen", label="50th percentile")
# plt.axvline(np.quantile(kurtosis_array_Gaussian_diff_attempt3,0.75), linestyle='--', color="gold", label="75th percentile")
# plt.axvline(np.quantile(kurtosis_array_Gaussian_diff_attempt3,0.95), linestyle='--', color="red", label="95th percentile")


# PLT.SHOW
plt.ylabel("Frequency per " + str(Gaussian_length3))
plt.xlabel("Kurtosis")
plt.legend()
plt.show()
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [180]:
print("Our graphs before were a bit messy.\n\nLet's make them messier by adding a direct comparison to the Gaussian on the same graph.")
Our graphs before were a bit messy.

Let's make them messier by adding a direct comparison to the Gaussian on the same graph.
In [179]:
plt.grid(b=True)
plt.title("Histogram of simulated final prices\nafter 8192 days for Norway")
plt.hist(Gaussian_Norway.iloc[-1], bins=40, color="aqua", alpha = 0.2, label = "Gaussian comparison")
plt.hist(df.Price_NORWAY[0]*np.exp(attempt.iloc[8191]), bins=40, color="orangered")
plt.xlabel("Final price (USD/NOK)")
plt.ylabel("Frequency")
plt.axvline(df.Price_NORWAY[0]*np.exp(attempt.iloc[8191].quantile(0.05)), linestyle='--', color="purple", label="5th percentile")
plt.axvline(df.Price_NORWAY[0]*np.exp(attempt.iloc[8191].quantile(0.25)), linestyle='--', color="blue", label="25th percentile")
plt.axvline(df.Price_NORWAY[0]*np.exp(attempt.iloc[8191].quantile(0.5)), linestyle='--', color="forestgreen", label="50th percentile")
plt.axvline(df.Price_NORWAY[0]*np.exp(attempt.iloc[8191].quantile(0.75)), linestyle='--', color="gold", label="75th percentile")
plt.axvline(df.Price_NORWAY[0]*np.exp(attempt.iloc[8191].quantile(0.95)), linestyle='--', color="red", label="95th percentile")
plt.axvline(df.Price_NORWAY.iloc[-1], linestyle='--', color="black", label="real final price", linewidth=0.7)
plt.legend()
plt.show()
In [202]:
plt.grid(b=True)
plt.title("Mean price change for \n" + str(attempt_length) + " MMAR generated price paths\nfor NORWAY")
plt.hist(diff_attempt.mean(), bins=30, color='orangered')
plt.hist(Gaussian_diff_attempt.mean(), bins=60, color='aqua', alpha = 0.2, label = "Gaussian comparison")
plt.axvline(df.LN_NORWAY.mean(), linestyle='--', color="black", label="real mean\n= "+str(round(df.LN_NORWAY.mean(), 6)))
plt.axvline(0, linestyle='--', color="forestgreen", label="mean = zero")
plt.ylabel("Frequency per " + str(attempt_length))
plt.xlabel("Mean")
plt.legend()
plt.show()
print((diff_attempt.mean()).describe())

plt.grid(b=True)
plt.title("Standard deviation of price changes for \n" + str(attempt_length) + " MMAR generated price paths\nfor NORWAY")
plt.hist(diff_attempt.std(), bins=30, color='orangered')
plt.axvline(df.LN_NORWAY.std(), linestyle='--', color="black", label="real standard deviation\n= "+str(round(df.LN_NORWAY.std(), 5)))
plt.ylabel("Frequency per " + str(attempt_length))
plt.xlabel("Standard Deviation")
plt.hist(Gaussian_diff_attempt.std(), bins=15, color='aqua', alpha = 0.2, label = "Gaussian comparison")
plt.legend()
plt.show()


print("\n\nBelow is a description of the standard deviations of our 10,000 simulations.\n")
(diff_attempt.std()).describe()
count    1.000000e+04
mean     4.783070e-07
std      4.479205e-05
min     -1.606284e-04
25%     -2.944317e-05
50%      5.766159e-07
75%      3.035007e-05
max      1.811756e-04
dtype: float64

Below is a description of the standard deviations of our 10,000 simulations.

Out[202]:
count    10000.000000
mean         0.007215
std          0.000106
min          0.006693
25%          0.007148
50%          0.007218
75%          0.007286
max          0.007604
dtype: float64
In [161]:
plt.grid(b=True)
plt.title("Histogram of simulated final prices\nafter 8192 days for Sweden")
plt.hist(df.Price_SWEDEN[0]*np.exp(attempt2.iloc[8191]), bins=40, color="goldenrod")
plt.hist(Gaussian_Sweden.iloc[-1], bins=40, color="aqua", alpha = 0.2, label = "Gaussian comparison")
plt.xlabel("Final price (SEK)")
plt.ylabel("Frequency")
plt.axvline(df.Price_SWEDEN[0]*np.exp(attempt2.iloc[8191].quantile(0.05)), linestyle='--', color="purple", label="5th percentile")
plt.axvline(df.Price_SWEDEN[0]*np.exp(attempt2.iloc[8191].quantile(0.25)), linestyle='--', color="blue", label="25th percentile")
plt.axvline(df.Price_SWEDEN[0]*np.exp(attempt2.iloc[8191].quantile(0.5)), linestyle='--', color="forestgreen", label="50th percentile")
plt.axvline(df.Price_SWEDEN[0]*np.exp(attempt2.iloc[8191].quantile(0.75)), linestyle='--', color="gold", label="75th percentile")
plt.axvline(df.Price_SWEDEN[0]*np.exp(attempt2.iloc[8191].quantile(0.95)), linestyle='--', color="red", label="95th percentile")
plt.axvline(df.Price_SWEDEN.iloc[-1], linestyle='--', color="black", label="real final price")

plt.legend()
plt.show()
In [204]:
plt.grid(b=True)
plt.title("Mean price change for \n" + str(attempt2_length) + " MMAR generated price paths\nfor SWEDEN")
plt.hist(diff_attempt2.mean(), bins=30, color='goldenrod')
plt.hist(Gaussian_diff_attempt2.mean(), bins=30, color='aqua', alpha = 0.2, label = "Gaussian comparison")
plt.axvline(df.LN_SWEDEN.mean(), linestyle='--', color="black", label="real mean\n= "+str(round(df.LN_SWEDEN.mean(), 6)))
plt.axvline(0, linestyle='--', color="forestgreen", label="mean = zero")
plt.ylabel("Frequency per " + str(attempt2_length))
plt.xlabel("Mean")
plt.legend()
plt.show()
print((diff_attempt2.mean()).describe())

plt.grid(b=True)
plt.title("Standard deviation of price changes for \n" + str(attempt2_length) + " MMAR generated price paths\nfor SWEDEN")
plt.hist(diff_attempt2.std(), bins=30, color='goldenrod')
plt.hist(Gaussian_diff_attempt2.std(), bins=30, color='aqua', alpha = 0.2, label = "Gaussian comparison")
plt.axvline(df.LN_SWEDEN.std(), linestyle='--', color="black", label="real standard deviation\n= "+str(round(df.LN_SWEDEN.std(), 4)))
plt.ylabel("Frequency per " + str(attempt2_length))
plt.xlabel("Standard Deviation")
plt.legend()
plt.show()


print("\n\nBelow is a description of the standard deviations of our " + str(attempt2_length) + " simulations.\n")
(diff_attempt2.std()).describe()
count    10000.000000
mean         0.000264
std          0.000102
min         -0.000031
25%          0.000193
50%          0.000275
75%          0.000347
max          0.000455
dtype: float64

Below is a description of the standard deviations of our 10000 simulations.

Out[204]:
count    10000.000000
mean         0.014604
std          0.000185
min          0.013831
25%          0.014475
50%          0.014601
75%          0.014723
max          0.015437
dtype: float64
In [169]:
plt.grid(b=True)
plt.title("Histogram of simulated final yields\nafter 8192 days for LIBOR")
plt.hist(df.Price_LIBOR[0]*np.exp(attempt3.iloc[8191]), bins=40, color="cornflowerblue")
plt.hist(Gaussian_LIBOR.iloc[-1], bins=40, color="aqua", alpha = 0.2, label = "Gaussian comparison")
plt.xlabel("Final yield (%)")
plt.ylabel("Frequency")
plt.axvline(df.Price_LIBOR[0]*np.exp(attempt3.iloc[8191].quantile(0.05)), linestyle='--', color="purple", label="5th percentile")
plt.axvline(df.Price_LIBOR[0]*np.exp(attempt3.iloc[8191].quantile(0.25)), linestyle='--', color="blue", label="25th percentile")
plt.axvline(df.Price_LIBOR[0]*np.exp(attempt3.iloc[8191].quantile(0.5)), linestyle='--', color="forestgreen", label="50th percentile")
plt.axvline(df.Price_LIBOR[0]*np.exp(attempt3.iloc[8191].quantile(0.75)), linestyle='--', color="gold", label="75th percentile")
plt.axvline(df.Price_LIBOR[0]*np.exp(attempt3.iloc[8191].quantile(0.95)), linestyle='--', color="red", label="95th percentile")
plt.axvline(df.Price_LIBOR.iloc[-1], linestyle='--', color="black", label="real final price")
plt.legend()
plt.show()
In [172]:
plt.grid(b=True)
plt.title("Mean yield change for \n" + str(attempt3_length) + " MMAR generated price paths\nfor 12-month LIBOR")
plt.hist(diff_attempt3.mean(), bins=30, color='cornflowerblue')
plt.hist(Gaussian_diff_attempt3.mean(), bins=30, color='aqua', alpha = 0.2, label = "Gaussian comparison")
plt.axvline(df.LN_LIBOR.mean(), linestyle='--', color="black", label="real mean\n= "+str(round(df.LN_LIBOR.mean(), 6)))
plt.axvline(0, linestyle='--', color="forestgreen", label="mean = zero")
plt.ylabel("Frequency per " + str(attempt3_length))
plt.xlabel("Mean")
plt.legend()
plt.show()
print((diff_attempt3.mean()).describe())

plt.grid(b=True)
plt.title("Standard deviation of yield changes for \n" + str(attempt3_length) + " MMAR generated price paths\nfor 12-month LIBOR")
plt.hist(diff_attempt3.std(), bins=30, color='cornflowerblue')
plt.hist(Gaussian_diff_attempt3.std(), bins=30, color='aqua', alpha = 0.2, label = "Gaussian comparison")
plt.axvline(df.LN_LIBOR.std(), linestyle='--', color="black", label="real standard deviation\n= "+str(round(df.LN_LIBOR.std(), 5)))
plt.ylabel("Frequency per " + str(attempt3_length))
plt.xlabel("Standard Deviation")
plt.legend()
plt.show()


print("\n\nBelow is a description of the standard deviations of our " + str(attempt3_length) + " simulations.\n")
print((diff_attempt3.std()).describe())
count    10000.000000
mean        -0.000332
std          0.000150
min         -0.000593
25%         -0.000453
50%         -0.000342
75%         -0.000222
max          0.000053
dtype: float64

Below is a description of the standard deviations of our 10000 simulations.

count    10000.000000
mean         0.008401
std          0.000306
min          0.007613
25%          0.008198
50%          0.008361
75%          0.008557
max          0.011558
dtype: float64
In [ ]:
 
In [ ]: